⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lustre_net.h

📁 lustre 1.6.5 source code
💻 H
📖 第 1 页 / 共 3 页
字号:
        int rq_early_count;           /* how many early replies (for stats) */        /* client+server request */        lnet_handle_md_t     rq_req_md_h;        struct ptlrpc_cb_id  rq_req_cbid;        /* server-side... */        struct timeval       rq_arrival_time;       /* request arrival time */        struct ptlrpc_reply_state *rq_reply_state;  /* separated reply state */        struct ptlrpc_request_buffer_desc *rq_rqbd; /* incoming request buffer*/#ifdef CRAY_XT3        __u32                rq_uid;            /* peer uid, used in MDS only */#endif        /* client-only incoming reply */        lnet_handle_md_t     rq_reply_md_h;        cfs_waitq_t          rq_reply_waitq;        struct ptlrpc_cb_id  rq_reply_cbid;        lnet_nid_t           rq_self;        lnet_process_id_t    rq_peer;        struct obd_export   *rq_export;        struct obd_import   *rq_import;        void (*rq_replay_cb)(struct ptlrpc_request *);        void (*rq_commit_cb)(struct ptlrpc_request *);        void  *rq_cb_data;        struct ptlrpc_bulk_desc *rq_bulk;       /* client side bulk */        /* client outgoing req */        time_t rq_sent;                         /* when request sent, seconds,                                                  * or time when request should                                                 * be sent */        volatile time_t rq_deadline;     /* when request must finish. volatile               so that servers' early reply updates to the deadline aren't                kept in per-cpu cache */        int    rq_timeout;               /* service time estimate (secs) */        /* Multi-rpc bits */        struct list_head rq_set_chain;        struct ptlrpc_request_set *rq_set;        void *rq_interpret_reply;               /* Async completion handler */        union ptlrpc_async_args rq_async_args;  /* Async completion context */        struct ptlrpc_request_pool *rq_pool;    /* Pool if request from                                                   preallocated list */};static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index){        LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);        LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);        req->rq_req_swab_mask |= 1 << index;}static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index){        LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);        LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);        req->rq_rep_swab_mask |= 1 << index;}static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index){         LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);        return req->rq_req_swab_mask & (1 << index);}static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index){        LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);        return req->rq_rep_swab_mask & (1 << index);}static inline const char *ptlrpc_rqphase2str(struct ptlrpc_request *req){        switch (req->rq_phase) {        case RQ_PHASE_NEW:                return "New";        case RQ_PHASE_RPC:                return "Rpc";        case RQ_PHASE_BULK:                return "Bulk";        case RQ_PHASE_INTERPRET:                return "Interpret";        case RQ_PHASE_COMPLETE:                return "Complete";        default:                return "?Phase?";        }}/* Spare the preprocessor, spoil the bugs. */#define FLAG(field, str) (field ? str : "")#define DEBUG_REQ_FLAGS(req)                                                  \        ptlrpc_rqphase2str(req),                                              \        FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"),                  \        FLAG(req->rq_err, "E"),                                               \        FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \        FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"),                \        FLAG(req->rq_no_resend, "N"),                                         \        FLAG(req->rq_waiting, "W")#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s"void _debug_req(struct ptlrpc_request *req, __u32 mask,                struct libcfs_debug_msg_data *data, const char *fmt, ...)        __attribute__ ((format (printf, 4, 5)));#define debug_req(cdls, level, req, file, func, line, fmt, a...)              \do {                                                                          \        CHECK_STACK();                                                        \                                                                              \        if (((level) & D_CANTMASK) != 0 ||                                    \            ((libcfs_debug & (level)) != 0 &&                                 \             (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) {              \                static struct libcfs_debug_msg_data _req_dbg_data =           \                DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \                _debug_req((req), (level), &_req_dbg_data, fmt, ##a);         \        }                                                                     \} while(0)/* for most callers (level is a constant) this is resolved at compile time */#define DEBUG_REQ(level, req, fmt, args...)                                   \do {                                                                          \        if ((level) & (D_ERROR | D_WARNING)) {                                \            static cfs_debug_limit_state_t cdls;                              \            debug_req(&cdls, level, req, __FILE__, __func__, __LINE__,        \                      "@@@ "fmt" ", ## args);                                 \        } else                                                                \            debug_req(NULL, level, req, __FILE__, __func__, __LINE__,         \                      "@@@ "fmt" ", ## args);                                 \} while (0)struct ptlrpc_bulk_page {        struct list_head bp_link;        int bp_buflen;        int bp_pageoffset;                      /* offset within a page */        struct page *bp_page;};#define BULK_GET_SOURCE   0#define BULK_PUT_SINK     1#define BULK_GET_SINK     2#define BULK_PUT_SOURCE   3struct ptlrpc_bulk_desc {        unsigned long bd_success:1;              /* completed successfully */        unsigned long bd_network_rw:1;           /* accessible to the network */        unsigned long bd_type:2;                 /* {put,get}{source,sink} */        unsigned long bd_registered:1;           /* client side */        spinlock_t   bd_lock;                   /* serialise with callback */        int bd_import_generation;        struct obd_export *bd_export;        struct obd_import *bd_import;        __u32 bd_portal;        struct ptlrpc_request *bd_req;          /* associated request */        cfs_waitq_t            bd_waitq;        /* server side only WQ */        int                    bd_iov_count;    /* # entries in bd_iov */        int                    bd_max_iov;      /* allocated size of bd_iov */        int                    bd_nob;          /* # bytes covered */        int                    bd_nob_transferred; /* # bytes GOT/PUT */        __u64                  bd_last_xid;        struct ptlrpc_cb_id    bd_cbid;         /* network callback info */        lnet_handle_md_t       bd_md_h;         /* associated MD */        lnet_nid_t             bd_sender;       /* stash event::sender */#if defined(__KERNEL__)        lnet_kiov_t             bd_iov[0];#else        lnet_md_iovec_t         bd_iov[0];#endif};struct ptlrpc_thread {        struct list_head t_link; /* active threads for service, from svc->srv_threads */        void *t_data;            /* thread-private data (preallocated memory) */        __u32 t_flags;        unsigned int t_id; /* service thread index, from ptlrpc_start_threads */        cfs_waitq_t t_ctl_waitq;};struct ptlrpc_request_buffer_desc {        struct list_head       rqbd_list;        struct list_head       rqbd_reqs;        struct ptlrpc_service *rqbd_service;        lnet_handle_md_t       rqbd_md_h;        int                    rqbd_refcount;        char                  *rqbd_buffer;        struct ptlrpc_cb_id    rqbd_cbid;        struct ptlrpc_request  rqbd_req;};typedef int (*svc_handler_t)(struct ptlrpc_request *req);typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);struct ptlrpc_service {        struct list_head srv_list;              /* chain thru all services */        int              srv_max_req_size;      /* biggest request to receive */        int              srv_max_reply_size;    /* biggest reply to send */        int              srv_buf_size;          /* size of individual buffers */        int              srv_nbuf_per_group;    /* # buffers to allocate in 1 group */        int              srv_nbufs;             /* total # req buffer descs allocated */        int              srv_threads_min;       /* threads to start at SOW */        int              srv_threads_max;       /* thread upper limit */        int              srv_threads_started;   /* index of last started thread */        int              srv_threads_running;   /* # running threads */        int              srv_n_difficult_replies; /* # 'difficult' replies */        int              srv_n_active_reqs;     /* # reqs being served */        cfs_duration_t   srv_rqbd_timeout;      /* timeout before re-posting reqs, in tick */        int              srv_watchdog_factor;   /* soft watchdog timeout mutiplier */        unsigned         srv_cpu_affinity:1;    /* bind threads to CPUs */        unsigned         srv_at_check:1;        /* check early replies */        __u32            srv_req_portal;        __u32            srv_rep_portal;                /* AT stuff */        struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */        spinlock_t        srv_at_lock;        struct list_head  srv_at_list;          /* reqs waiting for replies */        cfs_timer_t       srv_at_timer;         /* early reply timer */        int               srv_n_queued_reqs;    /* # reqs in either of the queues below */        struct list_head  srv_req_in_queue;     /* incoming reqs */        struct list_head  srv_request_queue;    /* reqs waiting for service */        struct list_head  srv_request_history;  /* request history */        __u64             srv_request_seq;      /* next request sequence # */        __u64             srv_request_max_cull_seq; /* highest seq culled from history */        svcreq_printfn_t  srv_request_history_print_fn; /* service-specific print fn */        struct list_head  srv_idle_rqbds;       /* request buffers to be reposted */        struct list_head  srv_active_rqbds;     /* req buffers receiving */        struct list_head  srv_history_rqbds;    /* request buffer history */        int               srv_nrqbd_receiving;  /* # posted request buffers */        int               srv_n_history_rqbds;  /* # request buffers in history */        int               srv_max_history_rqbds;/* max # request buffers in history */        atomic_t          srv_outstanding_replies;        struct list_head  srv_active_replies;   /* all the active replies */        struct list_head  srv_reply_queue;      /* replies waiting for service */        cfs_waitq_t       srv_waitq; /* all threads sleep on this. This                                      * wait-queue is signalled when new                                      * incoming request arrives and when                                      * difficult reply has to be handled. */        struct list_head   srv_threads;         /* service thread list */        svc_handler_t      srv_handler;        char *srv_name;  /* only statically allocated strings here; we don't clean them */        char *srv_thread_name;  /* only statically allocated strings here; we don't clean them */        spinlock_t               srv_lock;        cfs_proc_dir_entry_t    *srv_procroot;        struct lprocfs_stats    *srv_stats;        /* List of free reply_states */        struct list_head         srv_free_rs_list;        /* waitq to run, when adding stuff to srv_free_rs_list */        cfs_waitq_t              srv_free_rs_waitq;                /*         * if non-NULL called during thread creation (ptlrpc_start_thread())         * to initialize service specific per-thread state.         */        int (*srv_init)(struct ptlrpc_thread *thread);        /*         * if non-NULL called during thread shutdown (ptlrpc_main()) to         * destruct state created by ->srv_init().         */        void (*srv_done)(struct ptlrpc_thread *thread);        //struct ptlrpc_srv_ni srv_interfaces[0];};/* ptlrpc/events.c */extern lnet_handle_eq_t ptlrpc_eq_h;extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,                               lnet_process_id_t *peer, lnet_nid_t *self);extern void request_out_callback (lnet_event_t *ev);extern void reply_in_callback(lnet_event_t *ev);extern void client_bulk_callback (lnet_event_t *ev);extern void request_in_callback(lnet_event_t *ev);extern void reply_out_callback(lnet_event_t *ev);extern void server_bulk_callback (lnet_event_t *ev);/* ptlrpc/connection.c */void ptlrpc_dump_connections(void);void ptlrpc_readdress_connection(struct ptlrpc_connection *, struct obd_uuid *);struct ptlrpc_connection *ptlrpc_get_connection(lnet_process_id_t peer,                                                lnet_nid_t self, struct obd_uuid *uuid);int ptlrpc_put_connection(struct ptlrpc_connection *c);struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);int ptlrpc_init_connection(void);void ptlrpc_cleanup_connection(void);extern lnet_pid_t ptl_get_pid(void);/* ptlrpc/niobuf.c */int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);int ptlrpc_register_bulk(struct ptlrpc_request *req);void ptlrpc_unregister_bulk (struct ptlrpc_request *req);static inline int ptlrpc_bulk_active (struct ptlrpc_bulk_desc *desc)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -