ldlm_resource.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 1,062 行 · 第 1/3 页

C
1,062
字号
        at_init(&ns->ns_at_estimate, ldlm_enqueue_min, 0);        mutex_down(ldlm_namespace_lock(client));        list_add(&ns->ns_list_chain, ldlm_namespace_list(client));        atomic_inc(ldlm_namespace_nr(client));        mutex_up(ldlm_namespace_lock(client));        RETURN(ns);out_proc:        ldlm_namespace_cleanup(ns, 0);        OBD_FREE(ns->ns_name, namelen + 1);out_hash:        OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);out_ns:        OBD_FREE_PTR(ns);out_ref:        ldlm_put_ref(0);        RETURN(NULL);}extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);/* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup. * This is currently only used for recovery, and we make certain assumptions * as a result--notably, that we shouldn't cancel locks with refs. -phil * * Called with the ns_lock held. */static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,                             int flags){        struct list_head *tmp;        int rc = 0, client = ns_is_client(res->lr_namespace);        int local_only = (flags & LDLM_FL_LOCAL_ONLY);        ENTRY;                do {                struct ldlm_lock *lock = NULL;                 /* first, we look for non-cleaned-yet lock                 * all cleaned locks are marked by CLEANED flag */                lock_res(res);                list_for_each(tmp, q) {                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);                        if (lock->l_flags & LDLM_FL_CLEANED) {                                lock = NULL;                                continue;                        }                        LDLM_LOCK_GET(lock);                        lock->l_flags |= LDLM_FL_CLEANED;                        break;                }                                if (lock == NULL) {                        unlock_res(res);                        break;                }                /* Set CBPENDING so nothing in the cancellation path                 * can match this lock */                lock->l_flags |= LDLM_FL_CBPENDING;                lock->l_flags |= LDLM_FL_FAILED;                lock->l_flags |= flags;                /* ... without sending a CANCEL message for local_only. */                if (local_only)                        lock->l_flags |= LDLM_FL_LOCAL_ONLY;                if (local_only && (lock->l_readers || lock->l_writers)) {                        /* This is a little bit gross, but much better than the                         * alternative: pretend that we got a blocking AST from                         * the server, so that when the lock is decref'd, it                         * will go away ... */                        unlock_res(res);                        LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");                        if (lock->l_completion_ast)                                lock->l_completion_ast(lock, 0, NULL);                        LDLM_LOCK_PUT(lock);                        continue;                }                if (client) {                        struct lustre_handle lockh;                        unlock_res(res);                        ldlm_lock2handle(lock, &lockh);                        rc = ldlm_cli_cancel(&lockh);                        if (rc)                                CERROR("ldlm_cli_cancel: %d\n", rc);                } else {                        ldlm_resource_unlink_lock(lock);                        unlock_res(res);                        LDLM_DEBUG(lock, "Freeing a lock still held by a "                                   "client node");                        ldlm_lock_destroy(lock);                }                LDLM_LOCK_PUT(lock);        } while (1);        EXIT;}int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags){        struct list_head *tmp;        int i;        if (ns == NULL) {                CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");                return ELDLM_OK;        }        for (i = 0; i < RES_HASH_SIZE; i++) {                spin_lock(&ns->ns_hash_lock);                tmp = ns->ns_hash[i].next;                while (tmp != &(ns->ns_hash[i])) {                        struct ldlm_resource *res;                        res = list_entry(tmp, struct ldlm_resource, lr_hash);                        ldlm_resource_getref(res);                        spin_unlock(&ns->ns_hash_lock);                        cleanup_resource(res, &res->lr_granted, flags);                        cleanup_resource(res, &res->lr_converting, flags);                        cleanup_resource(res, &res->lr_waiting, flags);                        spin_lock(&ns->ns_hash_lock);                        tmp  = tmp->next;                        /* XXX: former stuff caused issues in case of race                         * between ldlm_namespace_cleanup() and lockd() when                         * client gets blocking ast when lock gets distracted by                         * server. This is 1_4 branch solution, let's see how                         * will it behave. */                        if (!ldlm_resource_putref_locked(res))                                CDEBUG(D_INFO,                                       "Namespace %s resource refcount nonzero "                                       "(%d) after lock cleanup; forcing cleanup.\n",                                       ns->ns_name, atomic_read(&res->lr_refcount));                }                spin_unlock(&ns->ns_hash_lock);        }        return ELDLM_OK;}int ldlm_namespace_free_prior(struct ldlm_namespace *ns){        ENTRY;        if (!ns)                RETURN(ELDLM_OK);        mutex_down(ldlm_namespace_lock(ns->ns_client));        /*         * Some asserts and possibly other parts of code still using          * list_empty(&ns->ns_list_chain). This is why it is important         * to use list_del_init() here.         */        list_del_init(&ns->ns_list_chain);        atomic_dec(ldlm_namespace_nr(ns->ns_client));        ldlm_pool_fini(&ns->ns_pool);        mutex_up(ldlm_namespace_lock(ns->ns_client));        /* At shutdown time, don't call the cancellation callback */        ldlm_namespace_cleanup(ns, 0);        if (ns->ns_refcount > 0) {                struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);                int rc;                CDEBUG(D_DLMTRACE,                       "dlm namespace %s free waiting on refcount %d\n",                       ns->ns_name, ns->ns_refcount);                rc = l_wait_event(ns->ns_waitq,                                  ns->ns_refcount == 0, &lwi);                if (ns->ns_refcount)                        LCONSOLE_ERROR_MSG(0x139, "Lock manager: wait for %s "                                           "namespace cleanup aborted with %d "                                           "resources in use. (%d)\nI'm going "                                           "to try to clean up anyway, but I "                                           "might need a reboot of this node.\n",                                            ns->ns_name, (int) ns->ns_refcount,                                             rc);                CDEBUG(D_DLMTRACE,                       "dlm namespace %s free done waiting\n", ns->ns_name);        }        RETURN(ELDLM_OK);}int ldlm_namespace_free_post(struct ldlm_namespace *ns, int force){        ENTRY;        if (!ns)                RETURN(ELDLM_OK);#ifdef LPROCFS        {                struct proc_dir_entry *dir;                dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);                if (dir == NULL) {                        CERROR("dlm namespace %s has no procfs dir?\n",                               ns->ns_name);                } else {                        lprocfs_remove(&dir);                }        }#endif        OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);        OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);        /*          * @ns should be not on list in this time, otherwise this will cause         * issues realted to using freed @ns in pools thread.          */        LASSERT(list_empty(&ns->ns_list_chain));        OBD_FREE_PTR(ns);        ldlm_put_ref(force);        RETURN(ELDLM_OK);}/* Cleanup the resource, and free namespace. * bug 12864: * Deadlock issue:  * proc1: destroy import  *        class_disconnect_export(grab cl_sem) ->  *              -> ldlm_namespace_free ->  *              -> lprocfs_remove(grab _lprocfs_lock). * proc2: read proc info *        lprocfs_fops_read(grab _lprocfs_lock) -> *              -> osc_rd_active, etc(grab cl_sem). * * So that I have to split the ldlm_namespace_free into two parts - the first * part ldlm_namespace_free_prior is used to cleanup the resource which is * being used; the 2nd part ldlm_namespace_free_post is used to unregister the * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem  * held. */int ldlm_namespace_free(struct ldlm_namespace *ns, int force){        ldlm_namespace_free_prior(ns);        ldlm_namespace_free_post(ns, force);        return ELDLM_OK;}void ldlm_namespace_get_nolock(struct ldlm_namespace *ns){        LASSERT(ns->ns_refcount >= 0);        ns->ns_refcount++;}void ldlm_namespace_get(struct ldlm_namespace *ns){        spin_lock(&ns->ns_hash_lock);        ldlm_namespace_get_nolock(ns);        spin_unlock(&ns->ns_hash_lock);}void ldlm_namespace_put_nolock(struct ldlm_namespace *ns, int wakeup){        LASSERT(ns->ns_refcount > 0);        ns->ns_refcount--;        if (ns->ns_refcount == 0 && wakeup)                wake_up(&ns->ns_waitq);}void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup){        spin_lock(&ns->ns_hash_lock);        ldlm_namespace_put_nolock(ns, wakeup);        spin_unlock(&ns->ns_hash_lock);}/* Should be called under ldlm_namespace_lock(client) taken */void ldlm_namespace_move(struct ldlm_namespace *ns, ldlm_side_t client){        LASSERT(!list_empty(&ns->ns_list_chain));        LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));        list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));}/* Should be called under ldlm_namespace_lock(client) taken */struct ldlm_namespace *ldlm_namespace_first(ldlm_side_t client){        LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));        LASSERT(!list_empty(ldlm_namespace_list(client)));        return container_of(ldlm_namespace_list(client)->next,                 struct ldlm_namespace, ns_list_chain);}static __u32 ldlm_hash_fn(struct ldlm_resource *parent, struct ldlm_res_id name){        __u32 hash = 0;        int i;        for (i = 0; i < RES_NAME_SIZE; i++)                hash += name.name[i];        hash += (__u32)((unsigned long)parent >> 4);        return (hash & RES_HASH_MASK);}static struct ldlm_resource *ldlm_resource_new(void){        struct ldlm_resource *res;        int idx;        OBD_SLAB_ALLOC(res, ldlm_resource_slab, CFS_ALLOC_IO, sizeof *res);        if (res == NULL)                return NULL;        memset(res, 0, sizeof(*res));        CFS_INIT_LIST_HEAD(&res->lr_children);        CFS_INIT_LIST_HEAD(&res->lr_childof);        CFS_INIT_LIST_HEAD(&res->lr_granted);        CFS_INIT_LIST_HEAD(&res->lr_converting);        CFS_INIT_LIST_HEAD(&res->lr_waiting);        /* initialize interval trees for each lock mode*/        for (idx = 0; idx < LCK_MODE_NUM; idx++) {                res->lr_itree[idx].lit_size = 0;                res->lr_itree[idx].lit_mode = 1 << idx;                res->lr_itree[idx].lit_root = NULL;        }        atomic_set(&res->lr_refcount, 1);        spin_lock_init(&res->lr_lock);        /* one who creates the resource must unlock         * the semaphore after lvb initialization */        init_MUTEX_LOCKED(&res->lr_lvb_sem);        return res;}/* must be called with hash lock held */static struct ldlm_resource *ldlm_resource_find(struct ldlm_namespace *ns, struct ldlm_res_id name, __u32 hash){        struct list_head *bucket, *tmp;        struct ldlm_resource *res;        LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);        bucket = ns->ns_hash + hash;        list_for_each(tmp, bucket) {                res = list_entry(tmp, struct ldlm_resource, lr_hash);                if (memcmp(&res->lr_name, &name, sizeof(res->lr_name)) == 0)                        return res;        }        return NULL;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?