📄 xfs_mru_cache.c
字号:
}intxfs_mru_cache_init(void){ xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t), "xfs_mru_cache_elem"); if (!xfs_mru_elem_zone) return ENOMEM; xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); if (!xfs_mru_reap_wq) { kmem_zone_destroy(xfs_mru_elem_zone); return ENOMEM; } return 0;}voidxfs_mru_cache_uninit(void){ destroy_workqueue(xfs_mru_reap_wq); kmem_zone_destroy(xfs_mru_elem_zone);}/* * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create() * with the address of the pointer, a lifetime value in milliseconds, a group * count and a free function to use when deleting elements. This function * returns 0 if the initialisation was successful. */intxfs_mru_cache_create( xfs_mru_cache_t **mrup, unsigned int lifetime_ms, unsigned int grp_count, xfs_mru_cache_free_func_t free_func){ xfs_mru_cache_t *mru = NULL; int err = 0, grp; unsigned int grp_time; if (mrup) *mrup = NULL; if (!mrup || !grp_count || !lifetime_ms || !free_func) return EINVAL; if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) return EINVAL; if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) return ENOMEM; /* An extra list is needed to avoid reaping up to a grp_time early. */ mru->grp_count = grp_count + 1; mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); if (!mru->lists) { err = ENOMEM; goto exit; } for (grp = 0; grp < mru->grp_count; grp++) INIT_LIST_HEAD(mru->lists + grp); /* * We use GFP_KERNEL radix tree preload and do inserts under a * spinlock so GFP_ATOMIC is appropriate for the radix tree itself. */ INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); INIT_LIST_HEAD(&mru->reap_list); spinlock_init(&mru->lock, "xfs_mru_cache"); INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); mru->grp_time = grp_time; mru->free_func = free_func; *mrup = mru;exit: if (err && mru && mru->lists) kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); if (err && mru) kmem_free(mru, sizeof(*mru)); return err;}/* * Call xfs_mru_cache_flush() to flush out all cached entries, calling their * free functions as they're deleted. When this function returns, the caller is * guaranteed that all the free functions for all the elements have finished * executing and the reaper is not running. */voidxfs_mru_cache_flush( xfs_mru_cache_t *mru){ if (!mru || !mru->lists) return; mutex_spinlock(&mru->lock); if (mru->queued) { mutex_spinunlock(&mru->lock, 0); cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); mutex_spinlock(&mru->lock); } _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); _xfs_mru_cache_clear_reap_list(mru); mutex_spinunlock(&mru->lock, 0);}voidxfs_mru_cache_destroy( xfs_mru_cache_t *mru){ if (!mru || !mru->lists) return; xfs_mru_cache_flush(mru); kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); kmem_free(mru, sizeof(*mru));}/* * To insert an element, call xfs_mru_cache_insert() with the data store, the * element's key and the client data pointer. This function returns 0 on * success or ENOMEM if memory for the data element couldn't be allocated. */intxfs_mru_cache_insert( xfs_mru_cache_t *mru, unsigned long key, void *value){ xfs_mru_cache_elem_t *elem; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return EINVAL; elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP); if (!elem) return ENOMEM; if (radix_tree_preload(GFP_KERNEL)) { kmem_zone_free(xfs_mru_elem_zone, elem); return ENOMEM; } INIT_LIST_HEAD(&elem->list_node); elem->key = key; elem->value = value; mutex_spinlock(&mru->lock); radix_tree_insert(&mru->store, key, elem); radix_tree_preload_end(); _xfs_mru_cache_list_insert(mru, elem); mutex_spinunlock(&mru->lock, 0); return 0;}/* * To remove an element without calling the free function, call * xfs_mru_cache_remove() with the data store and the element's key. On success * the client data pointer for the removed element is returned, otherwise this * function will return a NULL pointer. */void *xfs_mru_cache_remove( xfs_mru_cache_t *mru, unsigned long key){ xfs_mru_cache_elem_t *elem; void *value = NULL; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return NULL; mutex_spinlock(&mru->lock); elem = radix_tree_delete(&mru->store, key); if (elem) { value = elem->value; list_del(&elem->list_node); } mutex_spinunlock(&mru->lock, 0); if (elem) kmem_zone_free(xfs_mru_elem_zone, elem); return value;}/* * To remove and element and call the free function, call xfs_mru_cache_delete() * with the data store and the element's key. */voidxfs_mru_cache_delete( xfs_mru_cache_t *mru, unsigned long key){ void *value = xfs_mru_cache_remove(mru, key); if (value) mru->free_func(key, value);}/* * To look up an element using its key, call xfs_mru_cache_lookup() with the * data store and the element's key. If found, the element will be moved to the * head of the MRU list to indicate that it's been touched. * * The internal data structures are protected by a spinlock that is STILL HELD * when this function returns. Call xfs_mru_cache_done() to release it. Note * that it is not safe to call any function that might sleep in the interim. * * The implementation could have used reference counting to avoid this * restriction, but since most clients simply want to get, set or test a member * of the returned data structure, the extra per-element memory isn't warranted. * * If the element isn't found, this function returns NULL and the spinlock is * released. xfs_mru_cache_done() should NOT be called when this occurs. */void *xfs_mru_cache_lookup( xfs_mru_cache_t *mru, unsigned long key){ xfs_mru_cache_elem_t *elem; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return NULL; mutex_spinlock(&mru->lock); elem = radix_tree_lookup(&mru->store, key); if (elem) { list_del(&elem->list_node); _xfs_mru_cache_list_insert(mru, elem); } else mutex_spinunlock(&mru->lock, 0); return elem ? elem->value : NULL;}/* * To look up an element using its key, but leave its location in the internal * lists alone, call xfs_mru_cache_peek(). If the element isn't found, this * function returns NULL. * * See the comments above the declaration of the xfs_mru_cache_lookup() function * for important locking information pertaining to this call. */void *xfs_mru_cache_peek( xfs_mru_cache_t *mru, unsigned long key){ xfs_mru_cache_elem_t *elem; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return NULL; mutex_spinlock(&mru->lock); elem = radix_tree_lookup(&mru->store, key); if (!elem) mutex_spinunlock(&mru->lock, 0); return elem ? elem->value : NULL;}/* * To release the internal data structure spinlock after having performed an * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done() * with the data store pointer. */voidxfs_mru_cache_done( xfs_mru_cache_t *mru){ mutex_spinunlock(&mru->lock, 0);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -