cache.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 399 行 · 第 1/2 页

C
399
字号
        if (list_empty(&cache->lc_page_removal_callback_list))                cache->lc_pin_extent_cb = NULL;        return !found;}EXPORT_SYMBOL(cache_del_extent_removal_cb);static int cache_remove_extent_nolock(struct lustre_cache *cache,                                      struct osc_async_page *extent){        int have_lock = !!extent->oap_ldlm_lock;        /* We used to check oap_ldlm_lock for non NULL here, but it might be           NULL, in fact, due to parallel page eviction clearing it and waiting           on a lock's page list lock */        extent->oap_ldlm_lock = NULL;        if (!list_empty(&extent->oap_page_list))                list_del_init(&extent->oap_page_list);        return have_lock;}/* Request the @extent to be removed from cache and locks it belongs to. */void cache_remove_extent(struct lustre_cache *cache,                         struct osc_async_page *extent){        struct ldlm_lock *lock;        spin_lock(&extent->oap_lock);        lock = extent->oap_ldlm_lock;        extent->oap_ldlm_lock = NULL;        spin_unlock(&extent->oap_lock);        /* No lock - means this extent is not in any list */        if (!lock)                return;        spin_lock(&lock->l_extents_list_lock);        if (!list_empty(&extent->oap_page_list))                list_del_init(&extent->oap_page_list);        spin_unlock(&lock->l_extents_list_lock);}/* Iterate through list of extents in given lock identified by @lockh,   calling @cb_func for every such extent. Also passed @data to every call.   Stops iterating prematurely if @cb_func returns nonzero. */int cache_iterate_extents(struct lustre_cache *cache,                          struct lustre_handle *lockh,                          cache_iterate_extents_cb_t cb_func, void *data){        struct ldlm_lock *lock = ldlm_handle2lock(lockh);        struct osc_async_page *extent, *t;        if (!lock)      // Lock disappeared                return 0;        /* Parallel page removal from mem pressure can race with us */        spin_lock(&lock->l_extents_list_lock);        list_for_each_entry_safe(extent, t, &lock->l_extents_list,                                 oap_page_list) {                if (cb_func(cache, lockh, extent, data))                        break;        }        spin_unlock(&lock->l_extents_list_lock);        LDLM_LOCK_PUT(lock);        return 0;}static int cache_remove_extents_from_lock(struct lustre_cache *cache,                                          struct ldlm_lock *lock, void *data){        struct osc_async_page *extent;        void *ext_data;        LASSERT(lock);        spin_lock(&lock->l_extents_list_lock);        while (!list_empty(&lock->l_extents_list)) {                extent = list_entry(lock->l_extents_list.next,                                    struct osc_async_page, oap_page_list);                spin_lock(&extent->oap_lock);                /* If there is no lock referenced from this oap, it means                   there is parallel page-removal process waiting to free that                   page on l_extents_list_lock and it holds page lock.                   We need this page to completely go away and for that to                   happen we will just try to truncate it here too.                   Serialisation on page lock will achieve that goal for us. */                /* Try to add extent back to the cache first, but only if we                 * cancel read lock, write locks cannot have other overlapping                 * locks. If adding is not possible (or canceling pw lock),                 * then remove extent from cache */                if (!cache_remove_extent_nolock(cache, extent) ||                    (lock->l_granted_mode == LCK_PW) ||                    cache_add_extent(cache, &lock->l_resource->lr_name, extent,                                     NULL)) {                        /* We need to remember this oap_page value now,                           once we release spinlocks, extent struct                           might be freed and we endup requesting                           page with address 0x5a5a5a5a in                           cache_extent_removal_event */                        ext_data = extent->oap_page;                        cache->lc_pin_extent_cb(extent->oap_page);                        spin_unlock(&extent->oap_lock);                        spin_unlock(&lock->l_extents_list_lock);                        cache_extent_removal_event(cache, ext_data,                                                   lock->                                                   l_flags &                                                   LDLM_FL_DISCARD_DATA);                        spin_lock(&lock->l_extents_list_lock);                } else {                        spin_unlock(&extent->oap_lock);                }        }        spin_unlock(&lock->l_extents_list_lock);        return 0;}/* Remoes @lock from cache after necessary checks. */int cache_remove_lock(struct lustre_cache *cache, struct lustre_handle *lockh){        struct ldlm_lock *lock = ldlm_handle2lock(lockh);        if (!lock)  // The lock was removed by somebody just now, nothing to do                return 0;        cache_remove_extents_from_lock(cache, lock, NULL /*data */ );        spin_lock(&cache->lc_locks_list_lock);        list_del_init(&lock->l_cache_locks_list);        spin_unlock(&cache->lc_locks_list_lock);        LDLM_LOCK_PUT(lock);        return 0;}/* Supposed to iterate through all locks in the cache for given resource.   Not implemented atthe moment. */int cache_iterate_locks(struct lustre_cache *cache, struct ldlm_res_id *res,                        cache_iterate_locks_cb_t cb_fun, void *data){        return -ENOTSUPP;}/* Create lustre cache and attach it to @obd */struct lustre_cache *cache_create(struct obd_device *obd){        struct lustre_cache *cache;        OBD_ALLOC(cache, sizeof(*cache));        if (!cache)                GOTO(out, NULL);        spin_lock_init(&cache->lc_locks_list_lock);        CFS_INIT_LIST_HEAD(&cache->lc_locks_list);        CFS_INIT_LIST_HEAD(&cache->lc_page_removal_callback_list);        cache->lc_obd = obd;      out:        return cache;}/* Destroy @cache and free its memory */int cache_destroy(struct lustre_cache *cache){        if (cache) {                spin_lock(&cache->lc_locks_list_lock);                if (!list_empty(&cache->lc_locks_list)) {                        struct ldlm_lock *lock, *tmp;                        CERROR("still have locks in the list on cleanup:\n");                        list_for_each_entry_safe(lock, tmp,                                                 &cache->lc_locks_list,                                                 l_cache_locks_list) {                                list_del_init(&lock->l_cache_locks_list);                                /* XXX: Of course natural idea would be to print                                   offending locks here, but if we use                                   e.g. LDLM_ERROR, we will likely crash here,                                   as LDLM error tries to access e.g.                                   nonexisting namespace. Normally this kind of                                   case could only happen when somebody did not                                   release lock reference and we have other ways                                   to detect this. */                                /* Make sure there are no pages left under the                                   lock */                                LASSERT(list_empty(&lock->l_extents_list));                        }                }                spin_unlock(&cache->lc_locks_list_lock);                LASSERT(list_empty(&cache->lc_page_removal_callback_list));                OBD_FREE(cache, sizeof(*cache));        }        return 0;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?