ldlm_request.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 1,663 行 · 第 1/5 页

C
1,663
字号
{        LASSERT(imp != NULL);        return &imp->imp_obd->obd_namespace->ns_pool;}int ldlm_cli_update_pool(struct ptlrpc_request *req){        __u64 old_slv, new_slv;        struct ldlm_pool *pl;        __u32 new_limit;        ENTRY;            if (!imp_connect_lru_resize(req->rq_import))                RETURN(0);        /* In some cases RPC may contain slv and limit zeroed out. This is          * the case when server does not support lru resize feature. This is         * also possible in some recovery cases when server side reqs have no         * ref to obd export and thus access to server side namespace is no          * possible. */        if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||             lustre_msg_get_limit(req->rq_repmsg) == 0) {                DEBUG_REQ(D_HA, req, "zero SLV or Limit found "                          "(SLV: "LPU64", Limit: %u)",                           lustre_msg_get_slv(req->rq_repmsg),                           lustre_msg_get_limit(req->rq_repmsg));                RETURN(0);        }        new_limit = lustre_msg_get_limit(req->rq_repmsg);        new_slv = lustre_msg_get_slv(req->rq_repmsg);        pl = ldlm_imp2pl(req->rq_import);                spin_lock(&pl->pl_lock);        old_slv = ldlm_pool_get_slv(pl);        ldlm_pool_set_slv(pl, new_slv);        ldlm_pool_set_limit(pl, new_limit);        /* Check if we need to wakeup pools thread for fast SLV change.          * This is only done when threads period is noticably long like          * 10s or more. */#if defined(__KERNEL__) && (LDLM_POOLS_THREAD_PERIOD >= 10)        {                __u64 fast_change = old_slv * LDLM_POOLS_FAST_SLV_CHANGE;                do_div(fast_change, 100);                /* Wake up pools thread only if SLV has changed more than                  * 50% since last update. In this case we want to react asap.                  * Otherwise it is no sense to wake up pools as they are                  * re-calculated every LDLM_POOLS_THREAD_PERIOD anyways. */                if (old_slv > new_slv && old_slv - new_slv > fast_change)                        ldlm_pools_wakeup();        }#endif        spin_unlock(&pl->pl_lock);        RETURN(0);}EXPORT_SYMBOL(ldlm_cli_update_pool);int ldlm_cli_cancel(struct lustre_handle *lockh){        struct ldlm_namespace *ns;        int avail, flags, count = 1, rc = 0;        struct ldlm_lock *lock;        CFS_LIST_HEAD(cancels);        ENTRY;        /* concurrent cancels on the same handle can happen */        lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);        if (lock == NULL) {                LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");                RETURN(0);        }                rc = ldlm_cli_cancel_local(lock);        if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {                LDLM_LOCK_PUT(lock);                RETURN(rc < 0 ? rc : 0);        }        /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL         * rpc which goes to canceld portal, so we can cancel other lru locks         * here and send them all as one LDLM_CANCEL rpc. */        LASSERT(list_empty(&lock->l_bl_ast));        list_add(&lock->l_bl_ast, &cancels);        if (exp_connect_cancelset(lock->l_conn_export)) {                avail = ldlm_cancel_handles_avail(lock->l_conn_export);                LASSERT(avail > 0);                ns = lock->l_resource->lr_namespace;                flags = ns_connect_lru_resize(ns) ?                        LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;                count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,                                               LDLM_FL_BL_AST, flags);        }        ldlm_cli_cancel_list(&cancels, count, NULL, 0);        RETURN(0);}/* XXX until we will have compound requests and can cut cancels from generic rpc * we need send cancels with LDLM_FL_BL_AST flag as separate rpc */static int ldlm_cancel_list(struct list_head *cancels, int count, int flags){        CFS_LIST_HEAD(head);        struct ldlm_lock *lock, *next;        int left = 0, bl_ast = 0, rc;        left = count;        list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {                if (left-- == 0)                        break;                if (flags & LDLM_FL_LOCAL_ONLY) {                        rc = LDLM_FL_LOCAL_ONLY;                        ldlm_lock_cancel(lock);                } else {                        rc = ldlm_cli_cancel_local(lock);                }                if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {                        LDLM_DEBUG(lock, "Cancel lock separately");                        list_del_init(&lock->l_bl_ast);                        list_add(&lock->l_bl_ast, &head);                        bl_ast ++;                        continue;                }                if (rc == LDLM_FL_LOCAL_ONLY) {                        /* CANCEL RPC should not be sent to server. */                        list_del_init(&lock->l_bl_ast);                        LDLM_LOCK_PUT(lock);                        count--;                }        }        if (bl_ast > 0) {                count -= bl_ast;                ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);        }        RETURN(count);}/* Return 1 to stop lru processing and keep current lock cached. Return zero  * otherwise. */static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,                                                   struct ldlm_lock *lock,                                                   int unused, int added,                                                    int count){        int lock_cost;        __u64 page_nr;        /* Stop lru processing when we reached passed @count or checked all          * locks in lru. */        if (count && added >= count)                return LDLM_POLICY_KEEP_LOCK;        if (lock->l_resource->lr_type == LDLM_EXTENT) {                struct ldlm_extent *l_extent;                /* For all extent locks cost is 1 + number of pages in                 * their extent. */                l_extent = &lock->l_policy_data.l_extent;                page_nr = (l_extent->end - l_extent->start);                do_div(page_nr, CFS_PAGE_SIZE);#ifdef __KERNEL__                /* XXX: In fact this is evil hack, we can't access inode                 * here. For doing it right we need somehow to have number                 * of covered by lock. This should be fixed later when 10718                  * is landed. */                if (lock->l_ast_data != NULL) {                        struct inode *inode = lock->l_ast_data;                        if (page_nr > inode->i_mapping->nrpages)                                page_nr = inode->i_mapping->nrpages;                }#endif                lock_cost = 1 + page_nr;        } else {                /* For all locks which are not extent ones cost is 1 */                lock_cost = 1;        }        /* Keep all expensive locks in lru for the memory pressure time         * cancel policy. They anyways may be canceled by lru resize         * pplicy if they have not small enough CLV. */        return lock_cost > ns->ns_shrink_thumb ?                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;}/* Return 1 to stop lru processing and keep current lock cached. Return zero  * otherwise. */static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,                                                 struct ldlm_lock *lock,                                                  int unused, int added,                                                  int count){        cfs_time_t cur = cfs_time_current();        struct ldlm_pool *pl = &ns->ns_pool;        __u64 slv, lvf, lv;        cfs_time_t la;        /* Stop lru processing when we reached passed @count or checked all          * locks in lru. */        if (count && added >= count)                return LDLM_POLICY_KEEP_LOCK;        spin_lock(&pl->pl_lock);        slv = ldlm_pool_get_slv(pl);        lvf = atomic_read(&pl->pl_lock_volume_factor);        spin_unlock(&pl->pl_lock);        la = cfs_duration_sec(cfs_time_sub(cur,                               lock->l_last_used));        /* Stop when slv is not yet come from server or          * lv is smaller than it is. */        lv = lvf * la * unused;        return (slv == 1 || lv < slv) ?                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;}/* Return 1 to stop lru processing and keep current lock cached. Return zero  * otherwise. */static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,                                                   struct ldlm_lock *lock,                                                    int unused, int added,                                                   int count){        /* Stop lru processing when we reached passed @count or checked all          * locks in lru. */        return (added >= count) ?                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;}/* Return 1 to stop lru processing and keep current lock cached. Return zero  * otherwise. */static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,                                                 struct ldlm_lock *lock,                                                  int unused, int added,                                                 int count){        /* Stop lru processing if young lock is found and we reached passed          * @count. */        return ((added >= count) &&                 cfs_time_before(cfs_time_current(),                                cfs_time_add(lock->l_last_used,                                             ns->ns_max_age))) ?                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;}/* Return 1 to stop lru processing and keep current lock cached. Return zero  * otherwise. */static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,                                                    struct ldlm_lock *lock,                                                     int unused, int added,                                                    int count){        /* Stop lru processing when we reached passed @count or checked all          * locks in lru. */        return (added >= count) ?                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;}typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,                                                       struct ldlm_lock *, int,                                                       int, int);static ldlm_cancel_lru_policy_tldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags){        if (ns_connect_lru_resize(ns)) {                if (flags & LDLM_CANCEL_SHRINK)                        return ldlm_cancel_shrink_policy;                else if (flags & LDLM_CANCEL_LRUR)                        return ldlm_cancel_lrur_policy;                else if (flags & LDLM_CANCEL_PASSED)                        return ldlm_cancel_passed_policy;        } else {                if (flags & LDLM_CANCEL_AGED)                        return ldlm_cancel_aged_policy;        }                return ldlm_cancel_default_policy;} /* - Free space in lru for @count new locks, *   redundant unused locks are canceled locally; * - also cancel locally unused aged locks; * - do not cancel more than @max locks; * - GET the found locks and add them into the @cancels list. * * A client lock can be added to the l_bl_ast list only when it is * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing CANCEL. * There are the following use cases: ldlm_cancel_resource_local(), * ldlm_cancel_lru_local() and ldlm_cli_cancel(), which check&set this * flag properly. As any attempt to cancel a lock rely on this flag, * l_bl_ast list is accessed later without any special locking. * * Calling policies for enabled lru resize: * ---------------------------------------- * flags & LDLM_CANCEL_LRUR - use lru resize policy (SLV from server) to *                            cancel not more than @count locks; * * flags & LDLM_CANCEL_PASSED - cancel @count number of old locks (located at *                              the beginning of lru list); * * flags & LDLM_CANCEL_SHRINK - cancel not more than @count locks according to *                              memory pressre policy function; * * flags & LDLM_CANCEL_AGED -   cancel locks according to "aged policy". */int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,                          int count, int max, int cancel_flags, int flags){        ldlm_cancel_lru_policy_t pf;        struct ldlm_lock *lock, *next;        int added = 0, unused;        ENTRY;        spin_lock(&ns->ns_unused_lock);        unused = ns->ns_nr_unused;        if (!ns_connect_lru_resize(ns))                count += unused - ns->ns_max_unused;        pf = ldlm_cancel_lru_policy(ns, flags);        LASSERT(pf != NULL);                while (!list_empty(&ns->ns_unused_list)) {                /* For any flags, stop scanning if @max is reached. */                if (max && added >= max)                        break;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?