📄 selftest.h
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * * Copyright (C) 2001, 2002 Cluster File Systems, Inc. * Author: Isaac Huang <isaac@clusterfs.com> * */#ifndef __SELFTEST_SELFTEST_H__#define __SELFTEST_SELFTEST_H__#define LNET_ONLY#ifndef __KERNEL__#include <liblustre.h> /* userland spinlock_t and atomic_t */#endif#include <libcfs/kp30.h>#include <libcfs/libcfs.h>#include <lnet/lnet.h>#include <lnet/lib-lnet.h>#include <lnet/lib-types.h>#include <lnet/lnetst.h>#include "rpc.h"#include "timer.h"#ifndef MADE_WITHOUT_COMPROMISE#define MADE_WITHOUT_COMPROMISE#endif#define SWI_STATE_NEWBORN 0#define SWI_STATE_REPLY_SUBMITTED 1#define SWI_STATE_REPLY_SENT 2#define SWI_STATE_REQUEST_SUBMITTED 3#define SWI_STATE_REQUEST_SENT 4#define SWI_STATE_REPLY_RECEIVED 5#define SWI_STATE_BULK_STARTED 6#define SWI_STATE_DONE 10/* forward refs */struct swi_workitem;struct srpc_service;struct sfw_test_unit;struct sfw_test_instance;/* * A workitems is deferred work with these semantics: * - a workitem always runs in thread context. * - a workitem can be concurrent with other workitems but is strictly * serialized with respect to itself. * - no CPU affinity, a workitem does not necessarily run on the same CPU * that schedules it. However, this might change in the future. * - if a workitem is scheduled again before it has a chance to run, it * runs only once. * - if a workitem is scheduled while it runs, it runs again after it * completes; this ensures that events occurring while other events are * being processed receive due attention. This behavior also allows a * workitem to reschedule itself. * * Usage notes: * - a workitem can sleep but it should be aware of how that sleep might * affect others. * - a workitem runs inside a kernel thread so there's no user space to access. * - do not use a workitem if the scheduling latency can't be tolerated. * * When wi_action returns non-zero, it means the workitem has either been * freed or reused and workitem scheduler won't touch it any more. */typedef int (*swi_action_t) (struct swi_workitem *);typedef struct swi_workitem { struct list_head wi_list; /* chain on runq */ int wi_state; swi_action_t wi_action; void *wi_data; unsigned int wi_running:1; unsigned int wi_scheduled:1;} swi_workitem_t;static inline voidswi_init_workitem (swi_workitem_t *wi, void *data, swi_action_t action){ CFS_INIT_LIST_HEAD(&wi->wi_list); wi->wi_running = 0; wi->wi_scheduled = 0; wi->wi_data = data; wi->wi_action = action; wi->wi_state = SWI_STATE_NEWBORN;}#define SWI_RESCHED 128 /* # workitem scheduler loops before reschedule *//* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework * services, e.g. create/modify session. */#define SRPC_SERVICE_DEBUG 0#define SRPC_SERVICE_MAKE_SESSION 1#define SRPC_SERVICE_REMOVE_SESSION 2#define SRPC_SERVICE_BATCH 3#define SRPC_SERVICE_TEST 4#define SRPC_SERVICE_QUERY_STAT 5#define SRPC_SERVICE_JOIN 6#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10/* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */#define SRPC_SERVICE_BRW 11#define SRPC_SERVICE_PING 12#define SRPC_SERVICE_MAX_ID 12#define SRPC_REQUEST_PORTAL 50/* a lazy portal for framework RPC requests */#define SRPC_FRAMEWORK_REQUEST_PORTAL 51/* all reply/bulk RDMAs go to this portal */#define SRPC_RDMA_PORTAL 52static inline srpc_msg_type_tsrpc_service2request (int service){ switch (service) { default: LBUG (); case SRPC_SERVICE_DEBUG: return SRPC_MSG_DEBUG_REQST; case SRPC_SERVICE_MAKE_SESSION: return SRPC_MSG_MKSN_REQST; case SRPC_SERVICE_REMOVE_SESSION: return SRPC_MSG_RMSN_REQST; case SRPC_SERVICE_BATCH: return SRPC_MSG_BATCH_REQST; case SRPC_SERVICE_TEST: return SRPC_MSG_TEST_REQST; case SRPC_SERVICE_QUERY_STAT: return SRPC_MSG_STAT_REQST; case SRPC_SERVICE_BRW: return SRPC_MSG_BRW_REQST; case SRPC_SERVICE_PING: return SRPC_MSG_PING_REQST; case SRPC_SERVICE_JOIN: return SRPC_MSG_JOIN_REQST; }}static inline srpc_msg_type_tsrpc_service2reply (int service){ return srpc_service2request(service) + 1;}typedef enum { SRPC_BULK_REQ_RCVD = 0, /* passive bulk request(PUT sink/GET source) received */ SRPC_BULK_PUT_SENT = 1, /* active bulk PUT sent (source) */ SRPC_BULK_GET_RPLD = 2, /* active bulk GET replied (sink) */ SRPC_REPLY_RCVD = 3, /* incoming reply received */ SRPC_REPLY_SENT = 4, /* outgoing reply sent */ SRPC_REQUEST_RCVD = 5, /* incoming request received */ SRPC_REQUEST_SENT = 6, /* outgoing request sent */} srpc_event_type_t;/* RPC event */typedef struct { srpc_event_type_t ev_type; /* what's up */ lnet_event_kind_t ev_lnet; /* LNet event type */ int ev_fired; /* LNet event fired? */ int ev_status; /* LNet event status */ void *ev_data; /* owning server/client RPC */} srpc_event_t;typedef struct { int bk_len; /* len of bulk data */ lnet_handle_md_t bk_mdh; int bk_sink; /* sink/source */ int bk_niov; /* # iov in bk_iovs */#ifdef __KERNEL__ lnet_kiov_t bk_iovs[0];#else cfs_page_t **bk_pages; lnet_md_iovec_t bk_iovs[0];#endif} srpc_bulk_t; /* bulk descriptor */typedef struct srpc_peer { struct list_head stp_list; /* chain on peer hash */ struct list_head stp_rpcq; /* q of non-control RPCs */ struct list_head stp_ctl_rpcq; /* q of control RPCs */ spinlock_t stp_lock; /* serialize */ lnet_nid_t stp_nid; int stp_credits; /* available credits */} srpc_peer_t;/* message buffer descriptor */typedef struct { struct list_head buf_list; /* chain on srpc_service::*_msgq */ srpc_msg_t buf_msg; lnet_handle_md_t buf_mdh; lnet_nid_t buf_self; lnet_process_id_t buf_peer;} srpc_buffer_t;/* server-side state of a RPC */typedef struct srpc_server_rpc { struct list_head srpc_list; /* chain on srpc_service::*_rpcq */ struct srpc_service *srpc_service; swi_workitem_t srpc_wi; srpc_event_t srpc_ev; /* bulk/reply event */ lnet_nid_t srpc_self; lnet_process_id_t srpc_peer; srpc_msg_t srpc_replymsg; lnet_handle_md_t srpc_replymdh; srpc_buffer_t *srpc_reqstbuf; srpc_bulk_t *srpc_bulk; int srpc_status; void (*srpc_done)(struct srpc_server_rpc *);} srpc_server_rpc_t;/* client-side state of a RPC */typedef struct srpc_client_rpc { struct list_head crpc_list; /* chain on user's lists */ struct list_head crpc_privl; /* chain on srpc_peer_t::*rpcq */ spinlock_t crpc_lock; /* serialize */ int crpc_service; atomic_t crpc_refcount; int crpc_timeout; /* # seconds to wait for reply */ stt_timer_t crpc_timer; swi_workitem_t crpc_wi; lnet_process_id_t crpc_dest; srpc_peer_t *crpc_peer; void (*crpc_done)(struct srpc_client_rpc *); void (*crpc_fini)(struct srpc_client_rpc *); int crpc_status; /* completion status */ void *crpc_priv; /* caller data */ /* state flags */ unsigned int crpc_aborted:1; /* being given up */ unsigned int crpc_closed:1; /* completed */ /* RPC events */ srpc_event_t crpc_bulkev; /* bulk event */ srpc_event_t crpc_reqstev; /* request event */ srpc_event_t crpc_replyev; /* reply event */ /* bulk, request(reqst), and reply exchanged on wire */ srpc_msg_t crpc_reqstmsg; srpc_msg_t crpc_replymsg; lnet_handle_md_t crpc_reqstmdh; lnet_handle_md_t crpc_replymdh; srpc_bulk_t crpc_bulk;} srpc_client_rpc_t;#define srpc_client_rpc_size(rpc) \offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])#define srpc_client_rpc_addref(rpc) \do { \ CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ (rpc), libcfs_id2str((rpc)->crpc_dest), \ atomic_read(&(rpc)->crpc_refcount)); \ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ atomic_inc(&(rpc)->crpc_refcount); \} while (0)#define srpc_client_rpc_decref(rpc) \do { \ CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ (rpc), libcfs_id2str((rpc)->crpc_dest), \ atomic_read(&(rpc)->crpc_refcount)); \ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ srpc_destroy_client_rpc(rpc); \} while (0)#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \ (rpc)->crpc_reqstev.ev_fired == 0 || \ (rpc)->crpc_replyev.ev_fired == 0)typedef struct srpc_service { int sv_id; /* service id */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -