📄 viblnd.h
字号:
unsigned long ibc_last_send; /* time of last send */ struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */ struct list_head ibc_tx_queue_nocred; /* sends that don't need a cred */ struct list_head ibc_tx_queue_rsrvd; /* sends that need a reserved cred */ struct list_head ibc_tx_queue; /* send queue */ struct list_head ibc_active_txs; /* active tx awaiting completion */ spinlock_t ibc_lock; /* serialise */ kib_rx_t *ibc_rxs; /* the rx descs */ kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ vv_qp_h_t ibc_qp; /* queue pair */ cm_cep_handle_t ibc_cep; /* connection endpoint */ kib_connvars_t *ibc_connvars; /* in-progress connection state */} kib_conn_t;#define IBNAL_CONN_INIT_NOTHING 0 /* incomplete init */#define IBNAL_CONN_INIT_QP 1 /* QP allocated */#define IBNAL_CONN_INIT 2 /* completed init */#define IBNAL_CONN_ACTIVE_ARP 3 /* active arping */#define IBNAL_CONN_ACTIVE_CONNECT 4 /* active sending req */#define IBNAL_CONN_ACTIVE_CHECK_REPLY 5 /* active checking reply */#define IBNAL_CONN_ACTIVE_RTU 6 /* active sending rtu */#define IBNAL_CONN_PASSIVE_WAIT 7 /* passive waiting for rtu */#define IBNAL_CONN_ESTABLISHED 8 /* connection established */#define IBNAL_CONN_DISCONNECT1 9 /* disconnect phase 1 */#define IBNAL_CONN_DISCONNECT2 10 /* disconnect phase 2 */#define IBNAL_CONN_DISCONNECTED 11 /* disconnect complete */typedef struct kib_peer{ struct list_head ibp_list; /* stash on global peer list */ struct list_head ibp_connd_list; /* schedule on kib_connd_peers */ lnet_nid_t ibp_nid; /* who's on the other end(s) */ __u32 ibp_ip; /* IP to query for peer conn params */ int ibp_port; /* port to qery for peer conn params */ __u64 ibp_incarnation; /* peer's incarnation */ atomic_t ibp_refcount; /* # users */ int ibp_persistence; /* "known" peer refs */ struct list_head ibp_conns; /* all active connections */ struct list_head ibp_tx_queue; /* msgs waiting for a conn */ int ibp_connecting; /* current active connection attempts */ int ibp_accepting; /* current passive connection attempts */ int ibp_arp_count; /* # arp attempts */ unsigned long ibp_reconnect_time; /* when reconnect may be attempted */ unsigned long ibp_reconnect_interval; /* exponential backoff */ int ibp_error; /* errno on closing this peer */ cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */} kib_peer_t;extern kib_data_t kibnal_data;extern kib_tunables_t kibnal_tunables;int kibnal_startup (lnet_ni_t *ni);void kibnal_shutdown (lnet_ni_t *ni);int kibnal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);int kibnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);extern int kibnal_eager_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, void **new_private);int kibnal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen);extern void kibnal_init_msg(kib_msg_t *msg, int type, int body_nob);extern void kibnal_pack_msg(kib_msg_t *msg, __u32 version, int credits, lnet_nid_t dstnid, __u64 dststamp, __u64 seq);extern int kibnal_unpack_msg(kib_msg_t *msg, __u32 expected_version, int nob);extern int kibnal_create_peer(kib_peer_t **peerp, lnet_nid_t nid);extern void kibnal_destroy_peer(kib_peer_t *peer);extern int kibnal_add_persistent_peer (lnet_nid_t nid, __u32 ip);extern int kibnal_del_peer(lnet_nid_t nid);extern kib_peer_t *kibnal_find_peer_locked(lnet_nid_t nid);extern void kibnal_unlink_peer_locked(kib_peer_t *peer);extern void kibnal_peer_alive(kib_peer_t *peer);extern int kibnal_close_stale_conns_locked(kib_peer_t *peer, __u64 incarnation);extern kib_conn_t *kibnal_create_conn(cm_cep_handle_t cep);extern void kibnal_listen_callback(cm_cep_handle_t cep, cm_conn_data_t *info, void *arg);extern int kibnal_alloc_pages(kib_pages_t **pp, int npages, int access);extern void kibnal_free_pages(kib_pages_t *p);extern void kibnal_check_sends(kib_conn_t *conn);extern void kibnal_close_conn_locked(kib_conn_t *conn, int error);extern void kibnal_destroy_conn(kib_conn_t *conn);extern int kibnal_thread_start(int (*fn)(void *arg), void *arg);extern int kibnal_scheduler(void *arg);extern int kibnal_connd(void *arg);extern void kibnal_init_tx_msg(kib_tx_t *tx, int type, int body_nob);extern void kibnal_close_conn(kib_conn_t *conn, int why);extern int kibnal_set_qp_state(kib_conn_t *conn, vv_qp_state_t new_state);extern void kibnal_async_callback(vv_event_record_t ev);extern void kibnal_cq_callback(unsigned long context);extern void kibnal_passive_connreq(kib_pcreq_t *pcr, int reject);extern void kibnal_txlist_done (struct list_head *txlist, int status);extern void kibnal_queue_tx(kib_tx_t *tx, kib_conn_t *conn);extern int kibnal_init_rdma(kib_tx_t *tx, int type, int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);extern int kibnal_tunables_init(void);extern void kibnal_tunables_fini(void);#define kibnal_conn_addref(conn) \do { \ CDEBUG(D_NET, "conn[%p] (%d)++\n", \ (conn), atomic_read(&(conn)->ibc_refcount)); \ LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \ atomic_inc(&(conn)->ibc_refcount); \} while (0)#define kibnal_conn_decref(conn) \do { \ unsigned long flags; \ \ CDEBUG(D_NET, "conn[%p] (%d)--\n", \ (conn), atomic_read(&(conn)->ibc_refcount)); \ LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \ if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \ spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags); \ list_add_tail(&(conn)->ibc_list, \ &kibnal_data.kib_connd_zombies); \ wake_up(&kibnal_data.kib_connd_waitq); \ spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags); \ } \} while (0)#define kibnal_peer_addref(peer) \do { \ CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \ (peer), libcfs_nid2str((peer)->ibp_nid), \ atomic_read (&(peer)->ibp_refcount)); \ LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \ atomic_inc(&(peer)->ibp_refcount); \} while (0)#define kibnal_peer_decref(peer) \do { \ CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \ (peer), libcfs_nid2str((peer)->ibp_nid), \ atomic_read (&(peer)->ibp_refcount)); \ LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \ if (atomic_dec_and_test(&(peer)->ibp_refcount)) \ kibnal_destroy_peer(peer); \} while (0)static inline struct list_head *kibnal_nid2peerlist (lnet_nid_t nid){ unsigned int hash = ((unsigned int)nid) % kibnal_data.kib_peer_hash_size; return (&kibnal_data.kib_peers [hash]);}static inline intkibnal_peer_active (kib_peer_t *peer){ /* Am I in the peer hash table? */ return (!list_empty(&peer->ibp_list));}static inline voidkibnal_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn){ struct list_head *q; LASSERT (tx->tx_nwrq > 0); /* work items set up */ LASSERT (!tx->tx_queued); /* not queued for sending already */ tx->tx_queued = 1; tx->tx_deadline = jiffies + (*kibnal_tunables.kib_timeout * HZ); if (tx->tx_conn == NULL) { kibnal_conn_addref(conn); tx->tx_conn = conn; LASSERT (tx->tx_msg->ibm_type != IBNAL_MSG_PUT_DONE); } else { LASSERT (tx->tx_conn == conn); LASSERT (tx->tx_msg->ibm_type == IBNAL_MSG_PUT_DONE); } if (conn->ibc_version == IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD) { /* All messages have simple credit control */ q = &conn->ibc_tx_queue; } else { LASSERT (conn->ibc_version == IBNAL_MSG_VERSION); switch (tx->tx_msg->ibm_type) { case IBNAL_MSG_PUT_REQ: case IBNAL_MSG_GET_REQ: /* RDMA request: reserve a buffer for the RDMA reply * before sending */ q = &conn->ibc_tx_queue_rsrvd; break; case IBNAL_MSG_PUT_NAK: case IBNAL_MSG_PUT_ACK: case IBNAL_MSG_PUT_DONE: case IBNAL_MSG_GET_DONE: /* RDMA reply/completion: no credits; peer has reserved * a reply buffer */ q = &conn->ibc_tx_queue_nocred; break; case IBNAL_MSG_NOOP: case IBNAL_MSG_IMMEDIATE: /* Otherwise: consume a credit before sending */ q = &conn->ibc_tx_queue; break; default: LBUG(); q = NULL; } } list_add_tail(&tx->tx_list, q);}static inline intkibnal_send_keepalive(kib_conn_t *conn) { return (*kibnal_tunables.kib_keepalive > 0) && time_after(jiffies, conn->ibc_last_send + *kibnal_tunables.kib_keepalive*HZ);}#ifndef IBNAL_VOIDSTAR_SGADDR# define IBNAL_VOIDSTAR_SGADDR 0#endif#if IBNAL_VOIDSTAR_SGADDR# if defined(CONFIG_HIGHMEM)# if defined(CONFIG_X86) && defined(CONFIG_HIGHMEM4G) /* truncation to void* doesn't matter if 0 <= physmem < 4G * so allow x86 with 32 bit phys addrs */# elif defined(CONFIG_IA64) /* OK anyway on 64-bit arch */# else# error "Can't support HIGHMEM when vv_scatgat_t::v_address is void *"# endif# endif# define KIBNAL_ADDR2SG(a) ((void *)((unsigned long)(a)))# define KIBNAL_SG2ADDR(a) ((__u64)((unsigned long)(a)))static inline __u64 kibnal_addr2net (__u64 addr){ void *netaddr; vv_return_t vvrc = vv_va2advertise_addr(kibnal_data.kib_hca, KIBNAL_ADDR2SG(addr), &netaddr); LASSERT (vvrc == vv_return_ok); return KIBNAL_SG2ADDR(netaddr);}#else# define KIBNAL_ADDR2SG(a) a# define KIBNAL_SG2ADDR(a) astatic inline __u64 kibnal_addr2net (__u64 addr){ __u64 netaddr; vv_return_t vvrc = vv_va2advertise_addr(kibnal_data.kib_hca, addr, &netaddr); LASSERT (vvrc == vv_return_ok); return netaddr;}#endif/* CAVEAT EMPTOR: We rely on tx/rx descriptor alignment to allow us to use the * lowest 2 bits of the work request id to stash the work item type (the op * field is not valid when the wc completes in error). */#define IBNAL_WID_TX 0#define IBNAL_WID_RX 1#define IBNAL_WID_RDMA 2#define IBNAL_WID_MASK 3ULstatic inline vv_wr_id_tkibnal_ptr2wreqid (void *ptr, int type){ unsigned long lptr = (unsigned long)ptr; LASSERT ((lptr & IBNAL_WID_MASK) == 0); LASSERT ((type & ~IBNAL_WID_MASK) == 0); return (vv_wr_id_t)(lptr | type);}static inline void *kibnal_wreqid2ptr (vv_wr_id_t wreqid){ return (void *)(((unsigned long)wreqid) & ~IBNAL_WID_MASK);}static inline intkibnal_wreqid2type (vv_wr_id_t wreqid){ return (wreqid & IBNAL_WID_MASK);}static inline voidkibnal_set_conn_state (kib_conn_t *conn, int state){ conn->ibc_state = state; mb();}#if IBNAL_USE_FMRstatic inline intkibnal_rd_size (kib_rdma_desc_t *rd) { return rd->rd_nob;}#elsestatic inline __u64kibnal_rf_addr (kib_rdma_frag_t *rf){ return (((__u64)rf->rf_addr_hi)<<32) | ((__u64)rf->rf_addr_lo);}static inline voidkibnal_rf_set (kib_rdma_frag_t *rf, __u64 addr, int nob){ rf->rf_addr_lo = addr & 0xffffffff; rf->rf_addr_hi = (addr >> 32) & 0xffffffff; rf->rf_nob = nob;}static inline intkibnal_rd_size (kib_rdma_desc_t *rd){ int i; int size; for (i = size = 0; i < rd->rd_nfrag; i++) size += rd->rd_frags[i].rf_nob; return size;}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -