ldlm_extent.c
来自「lustre 1.6.5 source code」· C语言 代码 · 共 882 行 · 第 1/3 页
C
882 行
*err = compat; RETURN(compat);}static void discard_bl_list(struct list_head *bl_list){ struct list_head *tmp, *pos; ENTRY; list_for_each_safe(pos, tmp, bl_list) { struct ldlm_lock *lock = list_entry(pos, struct ldlm_lock, l_bl_ast); list_del_init(&lock->l_bl_ast); LASSERT(lock->l_flags & LDLM_FL_AST_SENT); lock->l_flags &= ~LDLM_FL_AST_SENT; LASSERT(lock->l_bl_ast_run == 0); LASSERT(lock->l_blocking_lock); LDLM_LOCK_PUT(lock->l_blocking_lock); lock->l_blocking_lock = NULL; LDLM_LOCK_PUT(lock); } EXIT;}/* If first_enq is 0 (ie, called from ldlm_reprocess_queue): * - blocking ASTs have already been sent * - must call this function with the ns lock held * * If first_enq is 1 (ie, called from ldlm_lock_enqueue): * - blocking ASTs have not been sent * - must call this function with the ns lock held once */int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq, ldlm_error_t *err, struct list_head *work_list){ struct ldlm_resource *res = lock->l_resource; struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list); int rc, rc2; int contended_locks = 0; ENTRY; LASSERT(list_empty(&res->lr_converting)); LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) || !(lock->l_flags & LDLM_AST_DISCARD_DATA)); check_res_locked(res); *err = ELDLM_OK; if (!first_enq) { /* Careful observers will note that we don't handle -EWOULDBLOCK * here, but it's ok for a non-obvious reason -- compat_queue * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT). * flags should always be zero here, and if that ever stops * being true, we want to find out. */ LASSERT(*flags == 0); rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err, NULL, &contended_locks); if (rc == 1) { rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err, NULL, &contended_locks); } if (rc == 0) RETURN(LDLM_ITER_STOP); ldlm_resource_unlink_lock(lock); if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE)) ldlm_extent_policy(res, lock, flags); ldlm_grant_lock(lock, work_list); RETURN(LDLM_ITER_CONTINUE); } restart: contended_locks = 0; rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err, &rpc_list, &contended_locks); if (rc < 0) GOTO(out, rc); /* lock was destroyed */ if (rc == 2) goto grant; rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err, &rpc_list, &contended_locks); if (rc2 < 0) GOTO(out, rc = rc2); /* lock was destroyed */ if (rc + rc2 == 2) { grant: ldlm_extent_policy(res, lock, flags); ldlm_resource_unlink_lock(lock); ldlm_grant_lock(lock, NULL); } else { /* If either of the compat_queue()s returned failure, then we * have ASTs to send and must go onto the waiting list. * * bug 2322: we used to unlink and re-add here, which was a * terrible folly -- if we goto restart, we could get * re-ordered! Causes deadlock, because ASTs aren't sent! */ if (list_empty(&lock->l_res_link)) ldlm_resource_add_lock(res, &res->lr_waiting, lock); unlock_res(res); rc = ldlm_run_bl_ast_work(&rpc_list); lock_res(res); if (rc == -ERESTART) { /* lock was granted while resource was unlocked. */ if (lock->l_granted_mode == lock->l_req_mode) { /* bug 11300: if the lock has been granted, * break earlier because otherwise, we will go * to restart and ldlm_resource_unlink will be * called and it causes the interval node to be * freed. Then we will fail at * ldlm_extent_add_lock() */ *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); GOTO(out, rc = 0); } GOTO(restart, -ERESTART); } *flags |= LDLM_FL_BLOCK_GRANTED; /* this way we force client to wait for the lock * endlessly once the lock is enqueued -bzzz */ *flags |= LDLM_FL_NO_TIMEOUT; } RETURN(0);out: if (!list_empty(&rpc_list)) { LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA)); discard_bl_list(&rpc_list); } RETURN(rc);}/* When a lock is cancelled by a client, the KMS may undergo change if this * is the "highest lock". This function returns the new KMS value. * Caller must hold ns_lock already. * * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms){ struct ldlm_resource *res = lock->l_resource; struct list_head *tmp; struct ldlm_lock *lck; __u64 kms = 0; ENTRY; /* don't let another thread in ldlm_extent_shift_kms race in * just after we finish and take our lock into account in its * calculation of the kms */ lock->l_flags |= LDLM_FL_KMS_IGNORE; list_for_each(tmp, &res->lr_granted) { lck = list_entry(tmp, struct ldlm_lock, l_res_link); if (lck->l_flags & LDLM_FL_KMS_IGNORE) continue; if (lck->l_policy_data.l_extent.end >= old_kms) RETURN(old_kms); /* This extent _has_ to be smaller than old_kms (checked above) * so kms can only ever be smaller or the same as old_kms. */ if (lck->l_policy_data.l_extent.end + 1 > kms) kms = lck->l_policy_data.l_extent.end + 1; } LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms); RETURN(kms);}cfs_mem_cache_t *ldlm_interval_slab;struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock){ struct ldlm_interval *node; ENTRY; LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node)); if (node == NULL) RETURN(NULL); CFS_INIT_LIST_HEAD(&node->li_group); ldlm_interval_attach(node, lock); RETURN(node);}void ldlm_interval_free(struct ldlm_interval *node){ if (node) { LASSERT(list_empty(&node->li_group)); OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node)); }}/* interval tree, for LDLM_EXTENT. */void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l){ LASSERT(l->l_tree_node == NULL); LASSERT(l->l_resource->lr_type == LDLM_EXTENT); list_add_tail(&l->l_sl_policy, &n->li_group); l->l_tree_node = n;}struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l){ struct ldlm_interval *n = l->l_tree_node; if (n == NULL) return NULL; LASSERT(!list_empty(&n->li_group)); l->l_tree_node = NULL; list_del_init(&l->l_sl_policy); return (list_empty(&n->li_group) ? n : NULL);}static inline int lock_mode_to_index(ldlm_mode_t mode){ int index; LASSERT(mode != 0); LASSERT(IS_PO2(mode)); for (index = -1; mode; index++, mode >>= 1) ; LASSERT(index < LCK_MODE_NUM); return index;}void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock){ struct interval_node *found, **root; struct ldlm_interval *node; struct ldlm_extent *extent; int idx; LASSERT(lock->l_granted_mode == lock->l_req_mode); node = lock->l_tree_node; LASSERT(node != NULL); idx = lock_mode_to_index(lock->l_granted_mode); LASSERT(lock->l_granted_mode == 1 << idx); LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode); /* node extent initialize */ extent = &lock->l_policy_data.l_extent; interval_set(&node->li_node, extent->start, extent->end); root = &res->lr_itree[idx].lit_root; found = interval_insert(&node->li_node, root); if (found) { /* The policy group found. */ struct ldlm_interval *tmp = ldlm_interval_detach(lock); LASSERT(tmp != NULL); ldlm_interval_free(tmp); ldlm_interval_attach(to_ldlm_interval(found), lock); } res->lr_itree[idx].lit_size++; /* even though we use interval tree to manage the extent lock, we also * add the locks into grant list, for debug purpose, .. */ ldlm_resource_add_lock(res, &res->lr_granted, lock);}void ldlm_extent_unlink_lock(struct ldlm_lock *lock){ struct ldlm_resource *res = lock->l_resource; struct ldlm_interval *node; struct ldlm_interval_tree *tree; int idx; if (lock->l_granted_mode != lock->l_req_mode) return; LASSERT(lock->l_tree_node != NULL); idx = lock_mode_to_index(lock->l_granted_mode); LASSERT(lock->l_granted_mode == 1 << idx); tree = &res->lr_itree[idx]; LASSERT(tree->lit_root != NULL); /* assure the tree is not null */ tree->lit_size--; node = ldlm_interval_detach(lock); if (node) { interval_erase(&node->li_node, &tree->lit_root); ldlm_interval_free(node); }}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?