ldlm_extent.c
来自「lustre 1.6.5 source code」· C语言 代码 · 共 882 行 · 第 1/3 页
C
882 行
LASSERT(!list_empty(&node->li_group)); list_for_each_entry(lock, &node->li_group, l_sl_policy) { /* interval tree is for granted lock */ LASSERTF(mode == lock->l_granted_mode, "mode = %s, lock->l_granted_mode = %s\n", ldlm_lockname[mode], ldlm_lockname[lock->l_granted_mode]); count++; if (lock->l_blocking_ast) ldlm_add_ast_work_item(lock, enq, work_list); } LASSERT(count > 0); /* don't count conflicting glimpse locks */ extent = ldlm_interval_extent(node); if (!(mode == LCK_PR && extent->start == 0 && extent->end == OBD_OBJECT_EOF)) *priv->locks += count; if (priv->compat) *priv->compat = 0; RETURN(INTERVAL_ITER_CONT);}/* Determine if the lock is compatible with all locks on the queue. * We stop walking the queue if we hit ourselves so we don't take * conflicting locks enqueued after us into accound, or we'd wait forever. * * 0 if the lock is not compatible * 1 if the lock is compatible * 2 if this group lock is compatible and requires no further checking * negative error, such as EWOULDBLOCK for group locks */static intldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req, int *flags, ldlm_error_t *err, struct list_head *work_list, int *contended_locks){ struct list_head *tmp; struct ldlm_lock *lock; struct ldlm_resource *res = req->l_resource; ldlm_mode_t req_mode = req->l_req_mode; __u64 req_start = req->l_req_extent.start; __u64 req_end = req->l_req_extent.end; int compat = 1; int scan = 0; int check_contention; ENTRY; lockmode_verify(req_mode); /* Using interval tree for granted lock */ if (queue == &res->lr_granted) { struct ldlm_interval_tree *tree; struct ldlm_extent_compat_args data = {.work_list = work_list, .lock = req, .locks = contended_locks, .compat = &compat }; struct interval_node_extent ex = { .start = req_start, .end = req_end }; int idx, rc; for (idx = 0; idx < LCK_MODE_NUM; idx++) { tree = &res->lr_itree[idx]; if (tree->lit_root == NULL) /* empty tree, skipped */ continue; data.mode = tree->lit_mode; if (lockmode_compat(req_mode, tree->lit_mode)) { struct ldlm_interval *node; struct ldlm_extent *extent; if (req_mode != LCK_GROUP) continue; /* group lock, grant it immediately if * compatible */ node = to_ldlm_interval(tree->lit_root); extent = ldlm_interval_extent(node); if (req->l_policy_data.l_extent.gid == extent->gid) RETURN(2); } if (tree->lit_mode == LCK_GROUP) { if (*flags & LDLM_FL_BLOCK_NOWAIT) { compat = -EWOULDBLOCK; goto destroylock; } *flags |= LDLM_FL_NO_TIMEOUT; if (!work_list) RETURN(0); /* if work list is not NULL,add all locks in the tree to work list */ compat = 0; interval_iterate(tree->lit_root, ldlm_extent_compat_cb, &data); continue; } if (!work_list) { rc = interval_is_overlapped(tree->lit_root,&ex); if (rc) RETURN(0); } else { interval_search(tree->lit_root, &ex, ldlm_extent_compat_cb, &data); if (!list_empty(work_list) && compat) compat = 0; } } RETURN(compat); } /* for waiting queue */ list_for_each(tmp, queue) { check_contention = 1; lock = list_entry(tmp, struct ldlm_lock, l_res_link); if (req == lock) break; if (unlikely(scan)) { /* We only get here if we are queuing GROUP lock and met some incompatible one. The main idea of this code is to insert GROUP lock past compatible GROUP lock in the waiting queue or if there is not any, then in front of first non-GROUP lock */ if (lock->l_req_mode != LCK_GROUP) { /* Ok, we hit non-GROUP lock, there should be no more GROUP locks later on, queue in front of first non-GROUP lock */ ldlm_resource_insert_lock_after(lock, req); list_del_init(&lock->l_res_link); ldlm_resource_insert_lock_after(req, lock); compat = 0; break; } if (req->l_policy_data.l_extent.gid == lock->l_policy_data.l_extent.gid) { /* found it */ ldlm_resource_insert_lock_after(lock, req); compat = 0; break; } continue; } /* locks are compatible, overlap doesn't matter */ if (lockmode_compat(lock->l_req_mode, req_mode)) { if (req_mode == LCK_PR && ((lock->l_policy_data.l_extent.start <= req->l_policy_data.l_extent.start) && (lock->l_policy_data.l_extent.end >= req->l_policy_data.l_extent.end))) { /* If we met a PR lock just like us or wider, and nobody down the list conflicted with it, that means we can skip processing of the rest of the list and safely place ourselves at the end of the list, or grant (dependent if we met an conflicting locks before in the list). In case of 1st enqueue only we continue traversing if there is something conflicting down the list because we need to make sure that something is marked as AST_SENT as well, in cse of empy worklist we would exit on first conflict met. */ /* There IS a case where such flag is not set for a lock, yet it blocks something. Luckily for us this is only during destroy, so lock is exclusive. So here we are safe */ if (!(lock->l_flags & LDLM_FL_AST_SENT)) { RETURN(compat); } } /* non-group locks are compatible, overlap doesn't matter */ if (likely(req_mode != LCK_GROUP)) continue; /* If we are trying to get a GROUP lock and there is another one of this kind, we need to compare gid */ if (req->l_policy_data.l_extent.gid == lock->l_policy_data.l_extent.gid) { /* If existing lock with matched gid is granted, we grant new one too. */ if (lock->l_req_mode == lock->l_granted_mode) RETURN(2); /* Otherwise we are scanning queue of waiting * locks and it means current request would * block along with existing lock (that is * already blocked. * If we are in nonblocking mode - return * immediately */ if (*flags & LDLM_FL_BLOCK_NOWAIT) { compat = -EWOULDBLOCK; goto destroylock; } /* If this group lock is compatible with another * group lock on the waiting list, they must be * together in the list, so they can be granted * at the same time. Otherwise the later lock * can get stuck behind another, incompatible, * lock. */ ldlm_resource_insert_lock_after(lock, req); /* Because 'lock' is not granted, we can stop * processing this queue and return immediately. * There is no need to check the rest of the * list. */ RETURN(0); } } if (unlikely(req_mode == LCK_GROUP && (lock->l_req_mode != lock->l_granted_mode))) { scan = 1; compat = 0; if (lock->l_req_mode != LCK_GROUP) { /* Ok, we hit non-GROUP lock, there should * be no more GROUP locks later on, queue in * front of first non-GROUP lock */ ldlm_resource_insert_lock_after(lock, req); list_del_init(&lock->l_res_link); ldlm_resource_insert_lock_after(req, lock); break; } if (req->l_policy_data.l_extent.gid == lock->l_policy_data.l_extent.gid) { /* found it */ ldlm_resource_insert_lock_after(lock, req); break; } continue; } if (unlikely(lock->l_req_mode == LCK_GROUP)) { /* If compared lock is GROUP, then requested is PR/PW/ * so this is not compatible; extent range does not * matter */ if (*flags & LDLM_FL_BLOCK_NOWAIT) { compat = -EWOULDBLOCK; goto destroylock; } else { *flags |= LDLM_FL_NO_TIMEOUT; } } else if (lock->l_policy_data.l_extent.end < req_start || lock->l_policy_data.l_extent.start > req_end) { /* if a non group lock doesn't overlap skip it */ continue; } else if (lock->l_req_extent.end < req_start || lock->l_req_extent.start > req_end) /* false contention, the requests doesn't really overlap */ check_contention = 0; if (!work_list) RETURN(0); /* don't count conflicting glimpse locks */ if (lock->l_req_mode == LCK_PR && lock->l_policy_data.l_extent.start == 0 && lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF) check_contention = 0; *contended_locks += check_contention; compat = 0; if (lock->l_blocking_ast) ldlm_add_ast_work_item(lock, req, work_list); } if (ldlm_check_contention(req, *contended_locks) && compat == 0 && (*flags & LDLM_FL_DENY_ON_CONTENTION) && req->l_req_mode != LCK_GROUP && req_end - req_start <= req->l_resource->lr_namespace->ns_max_nolock_size) GOTO(destroylock, compat = -EUSERS); RETURN(compat);destroylock: list_del_init(&req->l_res_link); ldlm_lock_destroy_nolock(req);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?