ldlm_pool.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 1,146 行 · 第 1/3 页

C
1,146
字号
                                    count);                return count;        }        return 0;}EXPORT_SYMBOL(ldlm_pool_recalc);int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,                     unsigned int gfp_mask){        int cancel = 0;                if (pl->pl_ops->po_shrink != NULL) {                cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);                if (nr > 0) {                        lprocfs_counter_add(pl->pl_stats,                                             LDLM_POOL_SHRINK_REQTD_STAT,                                            nr);                        lprocfs_counter_add(pl->pl_stats,                                             LDLM_POOL_SHRINK_FREED_STAT,                                            cancel);                        CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "                               "shrunk %d\n", pl->pl_name, nr, cancel);                }        }        return cancel;}EXPORT_SYMBOL(ldlm_pool_shrink);/* The purpose of this function is to re-setup limit and maximal allowed * slv according to the passed limit. */int ldlm_pool_setup(struct ldlm_pool *pl, int limit){        ENTRY;        if (pl->pl_ops->po_setup != NULL)                RETURN(pl->pl_ops->po_setup(pl, limit));        RETURN(0);}EXPORT_SYMBOL(ldlm_pool_setup);#ifdef __KERNEL__static int lprocfs_rd_pool_state(char *page, char **start, off_t off,                                 int count, int *eof, void *data){        int granted, grant_rate, cancel_rate, grant_step;        int nr = 0, grant_speed, grant_plan;        struct ldlm_pool *pl = data;        __u32 limit;        __u64 slv;        spin_lock(&pl->pl_lock);        slv = ldlm_pool_get_slv(pl);        limit = ldlm_pool_get_limit(pl);        grant_plan = pl->pl_grant_plan;        grant_step = pl->pl_grant_step;        granted = atomic_read(&pl->pl_granted);        grant_rate = atomic_read(&pl->pl_grant_rate);        grant_speed = atomic_read(&pl->pl_grant_speed);        cancel_rate = atomic_read(&pl->pl_cancel_rate);        spin_unlock(&pl->pl_lock);        nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",                       pl->pl_name);        nr += snprintf(page + nr, count - nr, "  SLV: "LPU64"\n", slv);        nr += snprintf(page + nr, count - nr, "  LVF: %d\n",                       atomic_read(&pl->pl_lock_volume_factor));        nr += snprintf(page + nr, count - nr, "  GSP: %d%%\n",                       grant_step);        nr += snprintf(page + nr, count - nr, "  GP:  %d\n",                       grant_plan);        nr += snprintf(page + nr, count - nr, "  GR:  %d\n",                       grant_rate);        nr += snprintf(page + nr, count - nr, "  CR:  %d\n",                       cancel_rate);        nr += snprintf(page + nr, count - nr, "  GS:  %d\n",                       grant_speed);        nr += snprintf(page + nr, count - nr, "  G:   %d\n",                       granted);        nr += snprintf(page + nr, count - nr, "  L:   %d\n",                       limit);        return nr;}LDLM_POOL_PROC_READER(grant_plan, int);LDLM_POOL_PROC_READER(grant_step, int);LDLM_POOL_PROC_WRITER(grant_step, int);static int ldlm_pool_proc_init(struct ldlm_pool *pl){        struct ldlm_namespace *ns = ldlm_pl2ns(pl);        struct proc_dir_entry *parent_ns_proc;        struct lprocfs_vars pool_vars[2];        char *var_name = NULL;        int rc = 0;        ENTRY;        OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);        if (!var_name)                RETURN(-ENOMEM);        parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);        if (parent_ns_proc == NULL) {                CERROR("%s: proc entry is not initialized\n",                       ns->ns_name);                GOTO(out_free_name, rc = -EINVAL);        }        pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,                                           NULL, NULL);        if (IS_ERR(pl->pl_proc_dir)) {                CERROR("LProcFS failed in ldlm-pool-init\n");                rc = PTR_ERR(pl->pl_proc_dir);                GOTO(out_free_name, rc);        }        var_name[MAX_STRING_SIZE] = '\0';        memset(pool_vars, 0, sizeof(pool_vars));        pool_vars[0].name = var_name;        snprintf(var_name, MAX_STRING_SIZE, "server_lock_volume");        pool_vars[0].data = &pl->pl_server_lock_volume;        pool_vars[0].read_fptr = lprocfs_rd_u64;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "limit");        pool_vars[0].data = &pl->pl_limit;        pool_vars[0].read_fptr = lprocfs_rd_atomic;        pool_vars[0].write_fptr = lprocfs_wr_atomic;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "granted");        pool_vars[0].data = &pl->pl_granted;        pool_vars[0].read_fptr = lprocfs_rd_atomic;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "grant_speed");        pool_vars[0].data = &pl->pl_grant_speed;        pool_vars[0].read_fptr = lprocfs_rd_atomic;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "cancel_rate");        pool_vars[0].data = &pl->pl_cancel_rate;        pool_vars[0].read_fptr = lprocfs_rd_atomic;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "grant_rate");        pool_vars[0].data = &pl->pl_grant_rate;        pool_vars[0].read_fptr = lprocfs_rd_atomic;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "grant_plan");        pool_vars[0].data = pl;        pool_vars[0].read_fptr = lprocfs_rd_grant_plan;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "grant_step");        pool_vars[0].data = pl;        pool_vars[0].read_fptr = lprocfs_rd_grant_step;        if (ns_is_server(ns))                pool_vars[0].write_fptr = lprocfs_wr_grant_step;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");        pool_vars[0].data = &pl->pl_lock_volume_factor;        pool_vars[0].read_fptr = lprocfs_rd_atomic;        pool_vars[0].write_fptr = lprocfs_wr_atomic;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        snprintf(var_name, MAX_STRING_SIZE, "state");        pool_vars[0].data = pl;        pool_vars[0].read_fptr = lprocfs_rd_pool_state;        lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);        pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -                                           LDLM_POOL_FIRST_STAT, 0);        if (!pl->pl_stats)                GOTO(out_free_name, rc = -ENOMEM);        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "granted", "locks");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,                              LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "grant", "locks");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,                              LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "cancel", "locks");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "grant_rate", "locks/s");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "cancel_rate", "locks/s");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "grant_plan", "locks/s");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "slv", "slv");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "shrink_request", "locks");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "shrink_freed", "locks");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "recalc_freed", "locks");        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,                             "recalc_timing", "sec");        lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);        EXIT;out_free_name:        OBD_FREE(var_name, MAX_STRING_SIZE + 1);        return rc;}static void ldlm_pool_proc_fini(struct ldlm_pool *pl){        if (pl->pl_stats != NULL) {                lprocfs_free_stats(&pl->pl_stats);                pl->pl_stats = NULL;        }        if (pl->pl_proc_dir != NULL) {                lprocfs_remove(&pl->pl_proc_dir);                pl->pl_proc_dir = NULL;        }}#else /* !__KERNEL__*/#define ldlm_pool_proc_init(pl) (0)#define ldlm_pool_proc_fini(pl) while (0) {}#endifint ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,                   int idx, ldlm_side_t client){        int rc;        ENTRY;        spin_lock_init(&pl->pl_lock);        atomic_set(&pl->pl_granted, 0);        pl->pl_recalc_time = cfs_time_current_sec();        atomic_set(&pl->pl_lock_volume_factor, 1);        atomic_set(&pl->pl_grant_rate, 0);        atomic_set(&pl->pl_cancel_rate, 0);        atomic_set(&pl->pl_grant_speed, 0);        pl->pl_grant_step = LDLM_POOL_GSP;        pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);        snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",                 ns->ns_name, idx);        if (client == LDLM_NAMESPACE_SERVER) {                pl->pl_ops = &ldlm_srv_pool_ops;                ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);                ldlm_pool_set_slv(pl, ldlm_pool_slv_max(LDLM_POOL_HOST_L));        } else {                ldlm_pool_set_slv(pl, 1);                ldlm_pool_set_limit(pl, 1);                pl->pl_ops = &ldlm_cli_pool_ops;        }        rc = ldlm_pool_proc_init(pl);        if (rc)                RETURN(rc);        CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);        RETURN(rc);}EXPORT_SYMBOL(ldlm_pool_init);void ldlm_pool_fini(struct ldlm_pool *pl){        ENTRY;        ldlm_pool_proc_fini(pl);        pl->pl_ops = NULL;        EXIT;}EXPORT_SYMBOL(ldlm_pool_fini);void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock){        /* FLOCK locks are special in a sense that they are almost never         * cancelled, instead special kind of lock is used to drop them.         * also there is no LRU for flock locks, so no point in tracking         * them anyway */        if (lock->l_resource->lr_type == LDLM_FLOCK)                return;        ENTRY;                        atomic_inc(&pl->pl_granted);        atomic_inc(&pl->pl_grant_rate);        atomic_inc(&pl->pl_grant_speed);        lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);         /* Do not do pool recalc for client side as all locks which         * potentially may be canceled has already been packed into          * enqueue/cancel rpc. Also we do not want to run out of stack         * with too long call paths. */        if (ns_is_server(ldlm_pl2ns(pl)))                ldlm_pool_recalc(pl);        EXIT;}EXPORT_SYMBOL(ldlm_pool_add);void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock){        if (lock->l_resource->lr_type == LDLM_FLOCK)                return;        ENTRY;        LASSERT(atomic_read(&pl->pl_granted) > 0);        atomic_dec(&pl->pl_granted);        atomic_inc(&pl->pl_cancel_rate);        atomic_dec(&pl->pl_grant_speed);                lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);        if (ns_is_server(ldlm_pl2ns(pl)))                ldlm_pool_recalc(pl);        EXIT;}EXPORT_SYMBOL(ldlm_pool_del);/* ->pl_lock should be taken. */__u64 ldlm_pool_get_slv(struct ldlm_pool *pl){        return pl->pl_server_lock_volume;}EXPORT_SYMBOL(ldlm_pool_get_slv);/* ->pl_lock should be taken. */void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv){        pl->pl_server_lock_volume = slv;}EXPORT_SYMBOL(ldlm_pool_set_slv);__u32 ldlm_pool_get_limit(struct ldlm_pool *pl){        return atomic_read(&pl->pl_limit);}EXPORT_SYMBOL(ldlm_pool_get_limit);void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit){        atomic_set(&pl->pl_limit, limit);}EXPORT_SYMBOL(ldlm_pool_set_limit);/* Server side is only enabled for kernel space for now. */#ifdef __KERNEL__static int ldlm_pool_granted(struct ldlm_pool *pl){        return atomic_read(&pl->pl_granted);}static struct ptlrpc_thread *ldlm_pools_thread;static struct shrinker *ldlm_pools_srv_shrinker;static struct shrinker *ldlm_pools_cli_shrinker;static struct completion ldlm_pools_comp;void ldlm_pools_wakeup(void){        ENTRY;        if (ldlm_pools_thread == NULL)                return;        ldlm_pools_thread->t_flags |= SVC_EVENT;        cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);        EXIT;}EXPORT_SYMBOL(ldlm_pools_wakeup);/* Cancel @nr locks from all namespaces (if possible). Returns number of * cached locks after shrink is finished. All namespaces are asked to * cancel approximately equal amount of locks. */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?