upcall_cache.c
来自「lustre 1.6.5 source code」· C语言 代码 · 共 519 行 · 第 1/2 页
C
519 行
list_for_each_entry_safe(entry, next, head, ue_hash) { /* check invalid & expired items */ if (check_unlink_entry(entry)) continue; if (entry->ue_key == key) { found = 1; break; } } if (!found) { /* didn't find it */ if (!new) { spin_unlock(&hash->uc_lock); new = alloc_entry(key); if (!new) { CERROR("fail to alloc entry\n"); RETURN(ERR_PTR(-ENOMEM)); } goto find_again; } else { list_add(&new->ue_hash, head); entry = new; } } else { if (new) { free_entry(new); new = NULL; } list_move(&entry->ue_hash, head); } get_entry(entry); /* acquire for new one */ if (UC_CACHE_IS_NEW(entry)) { UC_CACHE_SET_ACQUIRING(entry); UC_CACHE_CLEAR_NEW(entry); entry->ue_acquire_expire = jiffies + hash->uc_acquire_expire; spin_unlock(&hash->uc_lock); rc = refresh_entry(hash, entry); spin_lock(&hash->uc_lock); if (rc < 0) { UC_CACHE_CLEAR_ACQUIRING(entry); UC_CACHE_SET_INVALID(entry); } /* fall through */ } /* someone (and only one) is doing upcall upon * this item, just wait it complete */ if (UC_CACHE_IS_ACQUIRING(entry)) { init_waitqueue_entry(&wait, current); add_wait_queue(&entry->ue_waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&hash->uc_lock); schedule_timeout(hash->uc_acquire_expire); spin_lock(&hash->uc_lock); remove_wait_queue(&entry->ue_waitq, &wait); if (UC_CACHE_IS_ACQUIRING(entry)) { static unsigned long next; /* we're interrupted or upcall failed in the middle */ if (time_after(jiffies, next)) { CERROR("acquire timeout exceeded for key "LPU64 "\n", entry->ue_key); next = jiffies + 1800; } put_entry(entry); GOTO(out, entry = ERR_PTR(-EIDRM)); } /* fall through */ } /* invalid means error, don't need to try again */ if (UC_CACHE_IS_INVALID(entry)) { put_entry(entry); GOTO(out, entry = ERR_PTR(-EIDRM)); } /* check expired * We can't refresh the existing one because some * memory might be shared by multiple processes. */ if (check_unlink_entry(entry)) { /* if expired, try again. but if this entry is * created by me but too quickly turn to expired * without any error, should at least give a * chance to use it once. */ if (entry != new) { put_entry(entry); spin_unlock(&hash->uc_lock); new = NULL; goto find_again; } } /* Now we know it's good */out: spin_unlock(&hash->uc_lock); RETURN(entry);}EXPORT_SYMBOL(upcall_cache_get_entry);void upcall_cache_put_entry(struct upcall_cache *hash, struct upcall_cache_entry *entry){ ENTRY; if (!entry) { EXIT; return; } LASSERT(atomic_read(&entry->ue_refcount) > 0); spin_lock(&hash->uc_lock); put_entry(entry); spin_unlock(&hash->uc_lock); EXIT;}EXPORT_SYMBOL(upcall_cache_put_entry);int upcall_cache_downcall(struct upcall_cache *hash, __u32 err, __u64 key, __u32 primary, __u32 ngroups, __u32 *groups){ struct upcall_cache_entry *entry = NULL; struct list_head *head; int found = 0, rc = 0; ENTRY; LASSERT(hash); head = &hash->uc_hashtable[UC_CACHE_HASH_INDEX(key)]; spin_lock(&hash->uc_lock); list_for_each_entry(entry, head, ue_hash) { if (entry->ue_key == key) { found = 1; get_entry(entry); break; } } if (!found) { CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n", hash->uc_name, entry->ue_key); /* haven't found, it's possible */ spin_unlock(&hash->uc_lock); RETURN(-EINVAL); } if (err) { CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n", hash->uc_name, entry->ue_key, err); GOTO(out, rc = -EINVAL); } if (!UC_CACHE_IS_ACQUIRING(entry)) { CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n", hash->uc_name, entry, entry->ue_key); GOTO(out, rc = 0); } if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) { CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n", hash->uc_name, entry, entry->ue_key); GOTO(out, rc = -EINVAL); } spin_unlock(&hash->uc_lock); rc = entry_set_group_info(entry, primary, ngroups, groups); spin_lock(&hash->uc_lock); if (rc) GOTO(out, rc); entry->ue_expire = jiffies + hash->uc_entry_expire; UC_CACHE_SET_VALID(entry); CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n", hash->uc_name, entry, entry->ue_key);out: if (rc) { UC_CACHE_SET_INVALID(entry); list_del_init(&entry->ue_hash); } UC_CACHE_CLEAR_ACQUIRING(entry); spin_unlock(&hash->uc_lock); wake_up_all(&entry->ue_waitq); put_entry(entry); RETURN(rc);}EXPORT_SYMBOL(upcall_cache_downcall);static void cache_flush(struct upcall_cache *hash, int force){ struct upcall_cache_entry *entry, *next; int i; ENTRY; spin_lock(&hash->uc_lock); for (i = 0; i < UC_CACHE_HASH_SIZE; i++) { list_for_each_entry_safe(entry, next, &hash->uc_hashtable[i], ue_hash) { if (!force && atomic_read(&entry->ue_refcount)) { UC_CACHE_SET_EXPIRED(entry); continue; } LASSERT(!atomic_read(&entry->ue_refcount)); free_entry(entry); } } spin_unlock(&hash->uc_lock); EXIT;}void upcall_cache_flush_idle(struct upcall_cache *cache){ cache_flush(cache, 0);}EXPORT_SYMBOL(upcall_cache_flush_idle);void upcall_cache_flush_all(struct upcall_cache *cache){ cache_flush(cache, 1);}EXPORT_SYMBOL(upcall_cache_flush_all);struct upcall_cache *upcall_cache_init(const char *name){ struct upcall_cache *hash; int i; ENTRY; OBD_ALLOC(hash, sizeof(*hash)); if (!hash) RETURN(ERR_PTR(-ENOMEM)); spin_lock_init(&hash->uc_lock); for (i = 0; i < UC_CACHE_HASH_SIZE; i++) INIT_LIST_HEAD(&hash->uc_hashtable[i]); strncpy(hash->uc_name, name, sizeof(hash->uc_name) - 1); /* set default value, proc tunable */ strcpy(hash->uc_upcall, "NONE"); hash->uc_entry_expire = 10 * 60 * HZ; hash->uc_acquire_expire = 15 * HZ; RETURN(hash);}EXPORT_SYMBOL(upcall_cache_init);void upcall_cache_cleanup(struct upcall_cache *hash){ if (!hash) return; upcall_cache_flush_all(hash); OBD_FREE(hash, sizeof(*hash));}EXPORT_SYMBOL(upcall_cache_cleanup);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?