ldlm_request.c
来自「lustre 1.6.5 source code」· C语言 代码 · 共 1,663 行 · 第 1/5 页
C
1,663 行
list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){ /* No locks which got blocking requests. */ LASSERT(!(lock->l_flags & LDLM_FL_BL_AST)); /* Somebody is already doing CANCEL. No need in this * lock in lru, do not traverse it again. */ if (!(lock->l_flags & LDLM_FL_CANCELING)) break; ldlm_lock_remove_from_lru_nolock(lock); } if (&lock->l_lru == &ns->ns_unused_list) break; /* Pass the lock through the policy filter and see if it * should stay in lru. * * Even for shrinker policy we stop scanning if * we find a lock that should stay in the cache. * We should take into account lock age anyway * as new lock even if it is small of weight is * valuable resource. * * That is, for shrinker policy we drop only * old locks, but additionally chose them by * their weight. Big extent locks will stay in * the cache. */ if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK) break; LDLM_LOCK_GET(lock); /* dropped by bl thread */ spin_unlock(&ns->ns_unused_lock); lock_res_and_lock(lock); /* Check flags again under the lock. */ if ((lock->l_flags & LDLM_FL_CANCELING) || (ldlm_lock_remove_from_lru(lock) == 0)) { /* other thread is removing lock from lru or * somebody is already doing CANCEL or * there is a blocking request which will send * cancel by itseft or the lock is matched * is already not unused. */ unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); spin_lock(&ns->ns_unused_lock); continue; } LASSERT(!lock->l_readers && !lock->l_writers); /* If we have chosen to cancel this lock voluntarily, we * better send cancel notification to server, so that it * frees appropriate state. This might lead to a race * where while we are doing cancel here, server is also * silently cancelling this lock. */ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; /* Setting the CBPENDING flag is a little misleading, * but prevents an important race; namely, once * CBPENDING is set, the lock can accumulate no more * readers/writers. Since readers and writers are * already zero here, ldlm_lock_decref() won't see * this flag and call l_blocking_ast */ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING; /* We can't re-add to l_lru as it confuses the * refcounting in ldlm_lock_remove_from_lru() if an AST * arrives after we drop ns_lock below. We use l_bl_ast * and can't use l_pending_chain as it is used both on * server and client nevertheless bug 5666 says it is * used only on server */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, cancels); unlock_res_and_lock(lock); spin_lock(&ns->ns_unused_lock); added++; unused--; } spin_unlock(&ns->ns_unused_lock); RETURN(ldlm_cancel_list(cancels, added, cancel_flags));}/* Returns number of locks which could be canceled next time when * ldlm_cancel_lru() is called. Used from locks pool shrinker. */int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns, int count, int max, int flags){ ldlm_cancel_lru_policy_t pf; struct ldlm_lock *lock; int added = 0, unused; ENTRY; pf = ldlm_cancel_lru_policy(ns, flags); LASSERT(pf != NULL); spin_lock(&ns->ns_unused_lock); unused = ns->ns_nr_unused; list_for_each_entry(lock, &ns->ns_unused_list, l_lru) { /* For any flags, stop scanning if @max is reached. */ if (max && added >= max) break; /* Somebody is already doing CANCEL or there is a * blocking request will send cancel. Let's not count * this lock. */ if ((lock->l_flags & LDLM_FL_CANCELING) || (lock->l_flags & LDLM_FL_BL_AST)) continue; /* Pass the lock through the policy filter and see if it * should stay in lru. */ if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK) break; added++; unused--; } spin_unlock(&ns->ns_unused_lock); RETURN(added);}/* when called with LDLM_ASYNC the blocking callback will be handled * in a thread and this function will return after the thread has been * asked to call the callback. when called with LDLM_SYNC the blocking * callback will be performed in this function. */int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, int flags){ CFS_LIST_HEAD(cancels); int count, rc; ENTRY;#ifndef __KERNEL__ sync = LDLM_SYNC; /* force to be sync in user space */#endif count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags); if (sync == LDLM_ASYNC) { rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count); if (rc == 0) RETURN(count); } /* If an error occured in ASYNC mode, or * this is SYNC mode, cancel the list. */ ldlm_cli_cancel_list(&cancels, count, NULL, 0); RETURN(count);}/* Find and cancel locally unused locks found on resource, matched to the * given policy, mode. GET the found locks and add them into the @cancels * list. */int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, ldlm_mode_t mode, int lock_flags, int cancel_flags, void *opaque){ struct ldlm_lock *lock; int count = 0; ENTRY; lock_res(res); list_for_each_entry(lock, &res->lr_granted, l_res_link) { if (opaque != NULL && lock->l_ast_data != opaque) { LDLM_ERROR(lock, "data %p doesn't match opaque %p", lock->l_ast_data, opaque); //LBUG(); continue; } if (lock->l_readers || lock->l_writers) { if (cancel_flags & LDLM_FL_WARN) { LDLM_ERROR(lock, "lock in use"); //LBUG(); } continue; } /* If somebody is already doing CANCEL, or blocking ast came, * skip this lock. */ if (lock->l_flags & LDLM_FL_BL_AST || lock->l_flags & LDLM_FL_CANCELING) continue; if (lockmode_compat(lock->l_granted_mode, mode)) continue; /* If policy is given and this is IBITS lock, add to list only * those locks that match by policy. */ if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && !(lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits)) continue; /* See CBPENDING comment in ldlm_cancel_lru */ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING | lock_flags; LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, cancels); LDLM_LOCK_GET(lock); count++; } unlock_res(res); /* Handle only @count inserted locks. */ RETURN(ldlm_cancel_list(cancels, count, cancel_flags));}/* If @req is NULL, send CANCEL request to server with handles of locks * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests * separately per lock. * If @req is not NULL, put handles of locks in @cancels into the request * buffer at the offset @off. * Destroy @cancels at the end. */int ldlm_cli_cancel_list(struct list_head *cancels, int count, struct ptlrpc_request *req, int off){ struct ldlm_lock *lock; int res = 0; ENTRY; if (list_empty(cancels) || count == 0) RETURN(0); while (count) { LASSERT(!list_empty(cancels)); lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast); LASSERT(lock->l_conn_export); if (exp_connect_cancelset(lock->l_conn_export)) { res = count; if (req) ldlm_cancel_pack(req, off, cancels, count); else res = ldlm_cli_cancel_req(lock->l_conn_export, cancels, count); } else { res = ldlm_cli_cancel_req(lock->l_conn_export, cancels, 1); } if (res < 0) { CERROR("ldlm_cli_cancel_list: %d\n", res); res = count; } count -= res; ldlm_lock_list_put(cancels, l_bl_ast, res); } RETURN(0);}static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, struct ldlm_res_id res_id, int flags, void *opaque){ struct ldlm_resource *res; CFS_LIST_HEAD(cancels); int count; int rc; ENTRY; res = ldlm_resource_get(ns, NULL, res_id, 0, 0); if (res == NULL) { /* This is not a problem. */ CDEBUG(D_INFO, "No resource "LPU64"\n", res_id.name[0]); RETURN(0); } count = ldlm_cancel_resource_local(res, &cancels, NULL, LCK_MINMODE, 0, flags, opaque); rc = ldlm_cli_cancel_list(&cancels, count, NULL, 0); if (rc != ELDLM_OK) CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc); ldlm_resource_putref(res); RETURN(0);}static inline int have_no_nsresource(struct ldlm_namespace *ns){ int no_resource = 0; spin_lock(&ns->ns_hash_lock); if (ns->ns_resources == 0) no_resource = 1; spin_unlock(&ns->ns_hash_lock); RETURN(no_resource);}/* Cancel all locks on a namespace (or a specific resource, if given) * that have 0 readers/writers. * * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying * to notify the server. */int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, struct ldlm_res_id *res_id, int flags, void *opaque){ int i; ENTRY; if (ns == NULL) RETURN(ELDLM_OK); if (res_id) RETURN(ldlm_cli_cancel_unused_resource(ns, *res_id, flags, opaque)); spin_lock(&ns->ns_hash_lock); for (i = 0; i < RES_HASH_SIZE; i++) { struct list_head *tmp; tmp = ns->ns_hash[i].next; while (tmp != &(ns->ns_hash[i])) { struct ldlm_resource *res; int rc; res = list_entry(tmp, struct ldlm_resource, lr_hash); ldlm_resource_getref(res); spin_unlock(&ns->ns_hash_lock); rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name, flags, opaque); if (rc) CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n", res->lr_name.name[0], rc); spin_lock(&ns->ns_hash_lock); tmp = tmp->next;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?