lproc_ptlrpc.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 635 行 · 第 1/2 页

C
635
字号
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * *  Copyright (C) 2002 Cluster File Systems, Inc. * *   This file is part of the Lustre file system, http://www.lustre.org *   Lustre is a trademark of Cluster File Systems, Inc. * *   You may have signed or agreed to another license before downloading *   this software.  If so, you are bound by the terms and conditions *   of that agreement, and the following does not apply to you.  See the *   LICENSE file included with this distribution for more information. * *   If you did not agree to a different license, then this copy of Lustre *   is open source software; you can redistribute it and/or modify it *   under the terms of version 2 of the GNU General Public License as *   published by the Free Software Foundation. * *   In either case, Lustre is distributed in the hope that it will be *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *   license text for more details. * */#define DEBUG_SUBSYSTEM S_CLASS#ifndef __KERNEL__# include <liblustre.h>#endif#include <obd_support.h>#include <obd.h>#include <lprocfs_status.h>#include <lustre/lustre_idl.h>#include <lustre_net.h>#include <obd_class.h>#include "ptlrpc_internal.h"struct ll_rpc_opcode {     __u32       opcode;     const char *opname;} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {        { OST_REPLY,        "ost_reply" },        { OST_GETATTR,      "ost_getattr" },        { OST_SETATTR,      "ost_setattr" },        { OST_READ,         "ost_read" },        { OST_WRITE,        "ost_write" },        { OST_CREATE ,      "ost_create" },        { OST_DESTROY,      "ost_destroy" },        { OST_GET_INFO,     "ost_get_info" },        { OST_CONNECT,      "ost_connect" },        { OST_DISCONNECT,   "ost_disconnect" },        { OST_PUNCH,        "ost_punch" },        { OST_OPEN,         "ost_open" },        { OST_CLOSE,        "ost_close" },        { OST_STATFS,       "ost_statfs" },        { 14,                NULL },    /* formerly OST_SAN_READ */        { 15,                NULL },    /* formerly OST_SAN_WRITE */        { OST_SYNC,         "ost_sync" },        { OST_SET_INFO,     "ost_set_info" },        { OST_QUOTACHECK,   "ost_quotacheck" },        { OST_QUOTACTL,     "ost_quotactl" },        { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },        { MDS_GETATTR,      "mds_getattr" },        { MDS_GETATTR_NAME, "mds_getattr_lock" },        { MDS_CLOSE,        "mds_close" },        { MDS_REINT,        "mds_reint" },        { MDS_READPAGE,     "mds_readpage" },        { MDS_CONNECT,      "mds_connect" },        { MDS_DISCONNECT,   "mds_disconnect" },        { MDS_GETSTATUS,    "mds_getstatus" },        { MDS_STATFS,       "mds_statfs" },        { MDS_PIN,          "mds_pin" },        { MDS_UNPIN,        "mds_unpin" },        { MDS_SYNC,         "mds_sync" },        { MDS_DONE_WRITING, "mds_done_writing" },        { MDS_SET_INFO,     "mds_set_info" },        { MDS_QUOTACHECK,   "mds_quotacheck" },        { MDS_QUOTACTL,     "mds_quotactl" },        { MDS_GETXATTR,     "mds_getxattr" },        { MDS_SETXATTR,     "mds_setxattr" },        { LDLM_ENQUEUE,     "ldlm_enqueue" },        { LDLM_CONVERT,     "ldlm_convert" },        { LDLM_CANCEL,      "ldlm_cancel" },        { LDLM_BL_CALLBACK, "ldlm_bl_callback" },        { LDLM_CP_CALLBACK, "ldlm_cp_callback" },        { LDLM_GL_CALLBACK, "ldlm_gl_callback" },        { MGS_CONNECT,      "mgs_connect" },        { MGS_DISCONNECT,   "mgs_disconnect" },        { MGS_EXCEPTION,    "mgs_exception" },        { MGS_TARGET_REG,   "mgs_target_reg" },        { MGS_TARGET_DEL,   "mgs_target_del" },        { MGS_SET_INFO,     "mgs_set_info" },        { OBD_PING,         "obd_ping" },        { OBD_LOG_CANCEL,   "llog_origin_handle_cancel" },        { OBD_QC_CALLBACK,  "obd_quota_callback" },        { LLOG_ORIGIN_HANDLE_CREATE,     "llog_origin_handle_create" },        { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block"},        { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },        { LLOG_ORIGIN_HANDLE_WRITE_REC,  "llog_origin_handle_write_rec" },        { LLOG_ORIGIN_HANDLE_CLOSE,      "llog_origin_handle_close" },        { LLOG_ORIGIN_CONNECT,           "llog_origin_connect" },        { LLOG_CATINFO,                  "llog_catinfo" },        { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },        { LLOG_ORIGIN_HANDLE_DESTROY,    "llog_origin_handle_destroy" },};struct ll_eopcode {     __u32       opcode;     const char *opname;} ll_eopcode_table[EXTRA_LAST_OPC] = {        { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },        { LDLM_PLAIN_ENQUEUE,   "ldlm_plain_enqueue" },        { LDLM_EXTENT_ENQUEUE,  "ldlm_extent_enqueue" },        { LDLM_FLOCK_ENQUEUE,   "ldlm_flock_enqueue" },        { LDLM_IBITS_ENQUEUE,   "ldlm_ibits_enqueue" },        { MDS_REINT_CREATE,     "mds_reint_create" },        { MDS_REINT_LINK,       "mds_reint_link" },        { MDS_REINT_OPEN,       "mds_reint_open" },        { MDS_REINT_SETATTR,    "mds_reint_setattr" },        { MDS_REINT_RENAME,     "mds_reint_rename" },        { MDS_REINT_UNLINK,     "mds_reint_unlink" }};const char *ll_opcode2str(__u32 opcode){        /* When one of the assertions below fail, chances are that:         *     1) A new opcode was added in lustre_idl.h, but was         *        is missing from the table above.         * or  2) The opcode space was renumbered or rearranged,         *        and the opcode_offset() function in         *        ptlrpc_internal.h needs to be modified.         */        __u32 offset = opcode_offset(opcode);        LASSERT(offset < LUSTRE_MAX_OPCODES);        LASSERT(ll_rpc_opcode_table[offset].opcode == opcode);        return ll_rpc_opcode_table[offset].opname;}const char* ll_eopcode2str(__u32 opcode){        LASSERT(ll_eopcode_table[opcode].opcode == opcode);        return ll_eopcode_table[opcode].opname;}#ifdef LPROCFSvoid ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,                             char *name, struct proc_dir_entry **procroot_ret,                             struct lprocfs_stats **stats_ret){        struct proc_dir_entry *svc_procroot;        struct lprocfs_stats *svc_stats;        int i, rc;        unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |                                          LPROCFS_CNTR_STDDEV;        LASSERT(*procroot_ret == NULL);        LASSERT(*stats_ret == NULL);        svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES + LUSTRE_MAX_OPCODES, 0);        if (svc_stats == NULL)                return;        if (dir) {                svc_procroot = lprocfs_register(dir, root, NULL, NULL);                if (IS_ERR(svc_procroot)) {                        lprocfs_free_stats(&svc_stats);                        return;                }        } else {                svc_procroot = root;        }        lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,                             svc_counter_config, "req_waittime", "usec");        lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,                             svc_counter_config, "req_qdepth", "reqs");        lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,                             svc_counter_config, "req_active", "reqs");        lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,                             svc_counter_config, "req_timeout", "sec");        lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,                             svc_counter_config, "reqbuf_avail", "bufs");        for (i = 0; i < EXTRA_LAST_OPC; i++) {                lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,                                     svc_counter_config,                                     ll_eopcode2str(i), "reqs");        }        for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {                __u32 opcode = ll_rpc_opcode_table[i].opcode;                lprocfs_counter_init(svc_stats,                                     EXTRA_MAX_OPCODES + i, svc_counter_config,                                     ll_opcode2str(opcode), "usec");        }        rc = lprocfs_register_stats(svc_procroot, name, svc_stats);        if (rc < 0) {                if (dir)                        lprocfs_remove(&svc_procroot);                lprocfs_free_stats(&svc_stats);        } else {                if (dir)                        *procroot_ret = svc_procroot;                *stats_ret = svc_stats;        }}static intptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off,                                    int count, int *eof, void *data){        struct ptlrpc_service *svc = data;        *eof = 1;        return snprintf(page, count, "%d\n", svc->srv_n_history_rqbds);}static intptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off,                                    int count, int *eof, void *data){        struct ptlrpc_service *svc = data;        *eof = 1;        return snprintf(page, count, "%d\n", svc->srv_max_history_rqbds);}static intptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,                                     unsigned long count, void *data){        struct ptlrpc_service *svc = data;        int                    bufpages;        int                    val;        int                    rc = lprocfs_write_helper(buffer, count, &val);        if (rc < 0)                return rc;        if (val < 0)                return -ERANGE;        /* This sanity check is more of an insanity check; we can still         * hose a kernel by allowing the request history to grow too         * far. */        bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;        if (val > num_physpages/(2*bufpages))                return -ERANGE;        spin_lock(&svc->srv_lock);        svc->srv_max_history_rqbds = val;        spin_unlock(&svc->srv_lock);        return count;}struct ptlrpc_srh_iterator {        __u64                  srhi_seq;        struct ptlrpc_request *srhi_req;};intptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,                                    struct ptlrpc_srh_iterator *srhi,                                    __u64 seq){        struct list_head      *e;        struct ptlrpc_request *req;        if (srhi->srhi_req != NULL &&            srhi->srhi_seq > svc->srv_request_max_cull_seq &&            srhi->srhi_seq <= seq) {                /* If srhi_req was set previously, hasn't been culled and                 * we're searching for a seq on or after it (i.e. more                 * recent), search from it onwards.                 * Since the service history is LRU (i.e. culled reqs will                 * be near the head), we shouldn't have to do long                 * re-scans */                LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);                LASSERT (!list_empty(&svc->srv_request_history));                e = &srhi->srhi_req->rq_history_list;        } else {                /* search from start */                e = svc->srv_request_history.next;        }        while (e != &svc->srv_request_history) {                req = list_entry(e, struct ptlrpc_request, rq_history_list);                if (req->rq_history_seq >= seq) {                        srhi->srhi_seq = req->rq_history_seq;                        srhi->srhi_req = req;                        return 0;                }                e = e->next;        }        return -ENOENT;}static void *ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos){        struct ptlrpc_service       *svc = s->private;        struct ptlrpc_srh_iterator  *srhi;        int                          rc;        OBD_ALLOC(srhi, sizeof(*srhi));        if (srhi == NULL)                return NULL;        srhi->srhi_seq = 0;        srhi->srhi_req = NULL;        spin_lock(&svc->srv_lock);        rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);        spin_unlock(&svc->srv_lock);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?