ldlm_lock.c
来自「lustre 1.6.5 source code」· C语言 代码 · 共 1,767 行 · 第 1/5 页
C
1,767 行
RETURN(lock);}int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, struct ldlm_res_id new_resid){ struct ldlm_resource *oldres = lock->l_resource; struct ldlm_resource *newres; int type; ENTRY; LASSERT(ns_is_client(ns)); lock_res_and_lock(lock); if (memcmp(&new_resid, &lock->l_resource->lr_name, sizeof(lock->l_resource->lr_name)) == 0) { /* Nothing to do */ unlock_res_and_lock(lock); RETURN(0); } LASSERT(new_resid.name[0] != 0); /* This function assumes that the lock isn't on any lists */ LASSERT(list_empty(&lock->l_res_link)); type = oldres->lr_type; unlock_res_and_lock(lock); newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); if (newres == NULL) { LBUG(); RETURN(-ENOMEM); } lock_res_and_lock(lock); LASSERT(memcmp(&new_resid, &lock->l_resource->lr_name, sizeof(lock->l_resource->lr_name)) != 0); lock_res(newres); lock->l_resource = newres; unlock_res(oldres); unlock_res_and_lock(lock); /* ...and the flowers are still standing! */ ldlm_resource_putref(oldres); RETURN(0);}/* * HANDLES */void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh){ lockh->cookie = lock->l_handle.h_cookie;}/* if flags: atomically get the lock and set the flags. * Return NULL if flag already set */struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags){ struct ldlm_namespace *ns; struct ldlm_lock *lock = NULL, *retval = NULL; ENTRY; LASSERT(handle); lock = class_handle2object(handle->cookie); if (lock == NULL) RETURN(NULL); LASSERT(lock->l_resource != NULL); ns = lock->l_resource->lr_namespace; LASSERT(ns != NULL); lock_res_and_lock(lock); /* It's unlikely but possible that someone marked the lock as * destroyed after we did handle2object on it */ if (lock->l_destroyed) { unlock_res_and_lock(lock); CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); LDLM_LOCK_PUT(lock); GOTO(out, retval); } if (flags && (lock->l_flags & flags)) { unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); GOTO(out, retval); } if (flags) lock->l_flags |= flags; unlock_res_and_lock(lock); retval = lock; EXIT; out: return retval;}struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns, struct lustre_handle *handle){ struct ldlm_lock *retval = NULL; retval = __ldlm_handle2lock(handle, 0); return retval;}void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc){ struct obd_export *exp = lock->l_export?:lock->l_conn_export; /* INODEBITS_INTEROP: If the other side does not support * inodebits, reply with a plain lock descriptor. */ if ((lock->l_resource->lr_type == LDLM_IBITS) && (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) { struct ldlm_resource res = *lock->l_resource; /* Make sure all the right bits are set in this lock we are going to pass to client */ LASSERTF(lock->l_policy_data.l_inodebits.bits == (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE), "Inappropriate inode lock bits during " "conversion " LPU64 "\n", lock->l_policy_data.l_inodebits.bits); res.lr_type = LDLM_PLAIN; ldlm_res2desc(&res, &desc->l_resource); /* Convert "new" lock mode to something old client can understand */ if ((lock->l_req_mode == LCK_CR) || (lock->l_req_mode == LCK_CW)) desc->l_req_mode = LCK_PR; else desc->l_req_mode = lock->l_req_mode; if ((lock->l_granted_mode == LCK_CR) || (lock->l_granted_mode == LCK_CW)) { desc->l_granted_mode = LCK_PR; } else { /* We never grant PW/EX locks to clients */ LASSERT((lock->l_granted_mode != LCK_PW) && (lock->l_granted_mode != LCK_EX)); desc->l_granted_mode = lock->l_granted_mode; } /* We do not copy policy here, because there is no policy for plain locks */ } else { ldlm_res2desc(lock->l_resource, &desc->l_resource); desc->l_req_mode = lock->l_req_mode; desc->l_granted_mode = lock->l_granted_mode; desc->l_policy_data = lock->l_policy_data; }}void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, struct list_head *work_list){ if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); lock->l_flags |= LDLM_FL_AST_SENT; /* If the enqueuing client said so, tell the AST recipient to * discard dirty data, rather than writing back. */ if (new->l_flags & LDLM_AST_DISCARD_DATA) lock->l_flags |= LDLM_FL_DISCARD_DATA; LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); LASSERT(lock->l_blocking_lock == NULL); lock->l_blocking_lock = LDLM_LOCK_GET(new); }}void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list){ if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { lock->l_flags |= LDLM_FL_CP_REQD; LDLM_DEBUG(lock, "lock granted; sending completion AST."); LASSERT(list_empty(&lock->l_cp_ast)); list_add(&lock->l_cp_ast, work_list); LDLM_LOCK_GET(lock); }}/* must be called with lr_lock held */void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, struct list_head *work_list){ ENTRY; check_res_locked(lock->l_resource); if (new) ldlm_add_bl_work_item(lock, new, work_list); else ldlm_add_cp_work_item(lock, work_list); EXIT;}void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode){ struct ldlm_lock *lock; lock = ldlm_handle2lock(lockh); LASSERT(lock != NULL); ldlm_lock_addref_internal(lock, mode); LDLM_LOCK_PUT(lock);}void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode){ ldlm_lock_remove_from_lru(lock); if (mode & (LCK_NL | LCK_CR | LCK_PR)) lock->l_readers++; if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) lock->l_writers++; LDLM_LOCK_GET(lock); LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);}/* only called for local locks */void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode){ lock_res_and_lock(lock); ldlm_lock_addref_internal_nolock(lock, mode); unlock_res_and_lock(lock);}/* only called in ldlm_flock_destroy and for local locks. * for LDLM_FLOCK type locks, l_blocking_ast is null, and * ldlm_lock_remove_from_lru() does nothing, it is safe * for ldlm_flock_destroy usage by dropping some code */void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode){ LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); if (mode & (LCK_NL | LCK_CR | LCK_PR)) { LASSERT(lock->l_readers > 0); lock->l_readers--; } if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) { LASSERT(lock->l_writers > 0); lock->l_writers--; } LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */}void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode){ struct ldlm_namespace *ns; ENTRY; lock_res_and_lock(lock); ns = lock->l_resource->lr_namespace; ldlm_lock_decref_internal_nolock(lock, mode); if (lock->l_flags & LDLM_FL_LOCAL && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was * the last reference, cancel the lock. */ CDEBUG(D_INFO, "forcing cancel of local lock\n"); lock->l_flags |= LDLM_FL_CBPENDING; } if (!lock->l_readers && !lock->l_writers && (lock->l_flags & LDLM_FL_CBPENDING)) { /* If we received a blocked AST and this was the last reference, * run the callback. */ if (ns_is_server(ns) && lock->l_export) CERROR("FL_CBPENDING set on non-local lock--just a " "warning\n"); LDLM_DEBUG(lock, "final decref done on cbpending lock"); LDLM_LOCK_GET(lock); /* dropped by bl thread */ ldlm_lock_remove_from_lru(lock); unlock_res_and_lock(lock); if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) ldlm_handle_bl_callback(ns, NULL, lock); } else if (ns_is_client(ns) && !lock->l_readers && !lock->l_writers && !(lock->l_flags & LDLM_FL_NO_LRU) && !(lock->l_flags & LDLM_FL_BL_AST)) { /* If this is a client-side namespace and this was the last * reference, put it on the LRU. */ ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE * are not supported by the server, otherwise, it is done on * enqueue. */ if (!exp_connect_cancelset(lock->l_conn_export) && !ns_connect_lru_resize(ns)) ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0); } else { unlock_res_and_lock(lock); } EXIT;}void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode){ struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock);}/* This will drop a lock reference and mark it for destruction, but will not * necessarily cancel the lock before returning. */void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode){ struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); ENTRY; LASSERT(lock != NULL); LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); lock_res_and_lock(lock); lock->l_flags |= LDLM_FL_CBPENDING; unlock_res_and_lock(lock); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock);}/* * search_granted_lock * * Description: * Finds a position to insert the new lock. * Parameters: * queue [input]: the granted list where search acts on; * req [input]: the lock whose position to be located; * lockp [output]: the position where the lock should be inserted before, or * NULL indicating @req should be appended to @queue. * Return Values: * Bit-masks combination of following values indicating in which way the * lock need to be inserted. * - LDLM_JOIN_NONE: noting about skip list needs to be fixed; * - LDLM_MODE_JOIN_RIGHT: @req needs join right becoming the head of a * mode group; * - LDLM_POLICY_JOIN_RIGHT: @req needs join right becoming the head of * a policy group. * NOTE: called by * - ldlm_grant_lock_with_skiplist */static int search_granted_lock(struct list_head *queue, struct ldlm_lock *req,
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?