⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lustre_net.h

📁 lustre 1.6.5 source code
💻 H
📖 第 1 页 / 共 3 页
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * *  Copyright (C) 2002, 2003 Cluster File Systems, Inc. * *   This file is part of Lustre, http://www.lustre.org. * *   Lustre is free software; you can redistribute it and/or *   modify it under the terms of version 2 of the GNU General Public *   License as published by the Free Software Foundation. * *   Lustre is distributed in the hope that it will be useful, *   but WITHOUT ANY WARRANTY; without even the implied warranty of *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *   GNU General Public License for more details. * *   You should have received a copy of the GNU General Public License *   along with Lustre; if not, write to the Free Software *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */#ifndef _LUSTRE_NET_H#define _LUSTRE_NET_H#if defined(__linux__)#include <linux/lustre_net.h>#elif defined(__APPLE__)#include <darwin/lustre_net.h>#elif defined(__WINNT__)#include <winnt/lustre_net.h>#else#error Unsupported operating system.#endif#include <libcfs/kp30.h>// #include <obd.h>#include <lnet/lnet.h>#include <lustre/lustre_idl.h>#include <lustre_ha.h>#include <lustre_import.h>#include <lprocfs_status.h>/* MD flags we _always_ use */#define PTLRPC_MD_OPTIONS  0/* Define maxima for bulk I/O * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks) * these limits are system wide and not interface-local. */#define PTLRPC_MAX_BRW_BITS     LNET_MTU_BITS#define PTLRPC_MAX_BRW_SIZE     (1<<LNET_MTU_BITS)#define PTLRPC_MAX_BRW_PAGES    (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */#ifdef __KERNEL__# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)#  error "PTLRPC_MAX_BRW_PAGES isn't a power of two"# endif# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))#  error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"# endif# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)#  error "PTLRPC_MAX_BRW_SIZE too big"# endif# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)#  error "PTLRPC_MAX_BRW_PAGES too big"# endif#endif /* __KERNEL__ *//* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request * buffers */#define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)/* The following constants determine how memory is used to buffer incoming * service requests. * * ?_NBUFS              # buffers to allocate when growing the pool * ?_BUFSIZE            # bytes in a single request buffer * ?_MAXREQSIZE         # maximum request service will receive * * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk * of ?_NBUFS is added to the pool. * * Messages larger than ?_MAXREQSIZE are dropped.  Request buffers are * considered full when less than ?_MAXREQSIZE is left in them. */#define LDLM_THREADS_AUTO_MIN                                                 \        min((int)(num_online_cpus() * num_online_cpus() * 2), 8)#define LDLM_THREADS_AUTO_MAX (LDLM_THREADS_AUTO_MIN * 16)#define LDLM_BL_THREADS  LDLM_THREADS_AUTO_MIN#define LDLM_NBUFS      (64 * num_online_cpus())#define LDLM_BUFSIZE    (8 * 1024)#define LDLM_MAXREQSIZE (5 * 1024)#define LDLM_MAXREPSIZE (1024)/* Absolute limits */#define MDS_THREADS_MIN 2#define MDS_THREADS_MAX 512#define MDS_THREADS_MIN_READPAGE 2#define MDS_NBUFS       (64 * num_online_cpus())#define MDS_BUFSIZE     (8 * 1024)/* Assume file name length = FNAME_MAX = 256 (true for ext3). *        path name length = PATH_MAX = 4096 *        LOV MD size max  = EA_MAX = 4000 * symlink:  FNAME_MAX + PATH_MAX  <- largest * link:     FNAME_MAX + PATH_MAX  (mds_rec_link < mds_rec_create) * rename:   FNAME_MAX + FNAME_MAX * open:     FNAME_MAX + EA_MAX * * MDS_MAXREQSIZE ~= 4736 bytes = * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem: *      = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize) * * Realistic size is about 512 bytes (20 character name + 128 char symlink), * except in the open case where there are a large number of OSTs in a LOV. */#define MDS_MAXREQSIZE  (5 * 1024)#define MDS_MAXREPSIZE  max(9 * 1024, 280 + LOV_MAX_STRIPE_COUNT * 56)#define MGS_THREADS_AUTO_MIN 2#define MGS_THREADS_AUTO_MAX 32#define MGS_NBUFS       (64 * num_online_cpus())#define MGS_BUFSIZE     (8 * 1024)#define MGS_MAXREQSIZE  (8 * 1024)#define MGS_MAXREPSIZE  (9 * 1024)/* Absolute limits */#define OSS_THREADS_MIN 2#define OSS_THREADS_MAX 512#define OST_NBUFS       (64 * num_online_cpus())#define OST_BUFSIZE     (8 * 1024)/* OST_MAXREQSIZE ~= 4768 bytes = * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote * * - single object with 16 pages is 512 bytes * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover */#define OST_MAXREQSIZE  (5 * 1024)#define OST_MAXREPSIZE  (9 * 1024)struct ptlrpc_connection {        struct list_head        c_link;        struct hlist_node       c_hash;        lnet_nid_t              c_self;        lnet_process_id_t       c_peer;        struct obd_uuid         c_remote_uuid;        atomic_t                c_refcount;};struct ptlrpc_client {        __u32                     cli_request_portal;        __u32                     cli_reply_portal;        char                     *cli_name;};/* state flags of requests *//* XXX only ones left are those used by the bulk descs as well! */#define PTL_RPC_FL_INTR      (1 << 0)  /* reply wait was interrupted by user */#define PTL_RPC_FL_TIMEOUT   (1 << 7)  /* request timed out waiting for reply */#define REQ_MAX_ACK_LOCKS 8union ptlrpc_async_args {        /* Scratchpad for passing args to completion interpreter. Users         * cast to the struct of their choosing, and LASSERT that this is         * big enough.  For _tons_ of context, OBD_ALLOC a struct and store         * a pointer to it here.  The pointer_arg ensures this struct is at         * least big enough for that. */        void      *pointer_arg[9];        __u64      space[4];};struct ptlrpc_request_set;typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);struct ptlrpc_request_set {        int               set_remaining; /* # uncompleted requests */        cfs_waitq_t       set_waitq;        cfs_waitq_t      *set_wakeup_ptr;        struct list_head  set_requests;        struct list_head  set_cblist; /* list of completion callbacks */        set_interpreter_func    set_interpret; /* completion callback */        void              *set_arg; /* completion context */        /* locked so that any old caller can communicate requests to         * the set holder who can then fold them into the lock-free set */        spinlock_t        set_new_req_lock;        struct list_head  set_new_requests;};struct ptlrpc_set_cbdata {        struct list_head        psc_item;        set_interpreter_func    psc_interpret;        void                   *psc_data;};struct ptlrpc_bulk_desc;/* * ptlrpc callback & work item stuff */struct ptlrpc_cb_id {        void   (*cbid_fn)(lnet_event_t *ev);     /* specific callback fn */        void    *cbid_arg;                      /* additional arg */};#define RS_MAX_LOCKS 4#define RS_DEBUG     1struct ptlrpc_reply_state {        struct ptlrpc_cb_id    rs_cb_id;        struct list_head       rs_list;        struct list_head       rs_exp_list;        struct list_head       rs_obd_list;#if RS_DEBUG        struct list_head       rs_debug_list;#endif        /* updates to following flag serialised by srv_request_lock */        unsigned long          rs_difficult:1;     /* ACK/commit stuff */        unsigned long          rs_scheduled:1;     /* being handled? */        unsigned long          rs_scheduled_ever:1;/* any schedule attempts? */        unsigned long          rs_handled:1;  /* been handled yet? */        unsigned long          rs_on_net:1;   /* reply_out_callback pending? */        unsigned long          rs_prealloc:1; /* rs from prealloc list */        int                    rs_size;        __u64                  rs_transno;        __u64                  rs_xid;        struct obd_export     *rs_export;        struct ptlrpc_service *rs_service;        lnet_handle_md_t       rs_md_h;        atomic_t               rs_refcount;        /* locks awaiting client reply ACK */        int                    rs_nlocks;        struct lustre_handle   rs_locks[RS_MAX_LOCKS];        ldlm_mode_t            rs_modes[RS_MAX_LOCKS];        /* last member: variable sized reply message */        struct lustre_msg     *rs_msg;};struct ptlrpc_thread;enum rq_phase {        RQ_PHASE_NEW         = 0xebc0de00,        RQ_PHASE_RPC         = 0xebc0de01,        RQ_PHASE_BULK        = 0xebc0de02,        RQ_PHASE_INTERPRET   = 0xebc0de03,        RQ_PHASE_COMPLETE    = 0xebc0de04,};struct ptlrpc_request_pool {        spinlock_t prp_lock;        struct list_head prp_req_list;    /* list of ptlrpc_request structs */        int prp_rq_size;        void (*prp_populate)(struct ptlrpc_request_pool *, int);};struct ptlrpc_request {        int rq_type; /* one of PTL_RPC_MSG_* */        struct list_head rq_list;        struct list_head rq_timed_list;         /* server-side early replies */        struct list_head rq_history_list;       /* server-side history */        __u64            rq_history_seq;        /* history sequence # */        int rq_status;        spinlock_t rq_lock;        /* client-side flags are serialized by rq_lock */        unsigned long rq_intr:1, rq_replied:1, rq_err:1,                rq_timedout:1, rq_resend:1, rq_restart:1,                /*                 * when ->rq_replay is set, request is kept by the client even                 * after server commits corresponding transaction. This is                 * used for operations that require sequence of multiple                 * requests to be replayed. The only example currently is file                 * open/close. When last request in such a sequence is                 * committed, ->rq_replay is cleared on all requests in the                 * sequence.                 */                rq_replay:1,                rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,                rq_no_delay:1, rq_net_err:1, rq_early:1, rq_must_unlink:1,                /* server-side flags */                rq_packed_final:1,  /* packed final reply */                rq_sent_final:1;    /* stop sending early replies */        enum rq_phase rq_phase; /* one of RQ_PHASE_* */        atomic_t rq_refcount;   /* client-side refcount for SENT race,                                   server-side refcounf for multiple replies */        struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */        int rq_request_portal;  /* XXX FIXME bug 249 */        int rq_reply_portal;    /* XXX FIXME bug 249 */        int rq_nob_received; /* client-side # reply bytes actually received  */        int rq_reqlen;        struct lustre_msg *rq_reqmsg;        int rq_replen;        struct lustre_msg *rq_repbuf; /* client only, buf may be bigger than msg */        struct lustre_msg *rq_repmsg;        __u64 rq_transno;        __u64 rq_xid;        struct list_head rq_replay_list;        __u32 rq_req_swab_mask;        __u32 rq_rep_swab_mask;        int rq_import_generation;        enum lustre_imp_state rq_send_state;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -