⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cma.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Copyright (c) 2005 Voltaire Inc.  All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved. * * This Software is licensed under one of the following licenses: * * 1) under the terms of the "Common Public License 1.0" a copy of which is *    available from the Open Source Initiative, see *    http://www.opensource.org/licenses/cpl.php. * * 2) under the terms of the "The BSD License" a copy of which is *    available from the Open Source Initiative, see *    http://www.opensource.org/licenses/bsd-license.php. * * 3) under the terms of the "GNU General Public License (GPL) Version 2" a *    copy of which is available from the Open Source Initiative, see *    http://www.opensource.org/licenses/gpl-license.php. * * Licensee has the right to choose one of the above licenses. * * Redistributions of source code must retain the above copyright * notice and one of the license notices. * * Redistributions in binary form must reproduce both the above copyright * notice, one of the license notices in the documentation * and/or other materials provided with the distribution. * */#include <linux/completion.h>#include <linux/in.h>#include <linux/in6.h>#include <linux/mutex.h>#include <linux/random.h>#include <linux/idr.h>#include <linux/inetdevice.h>#include <net/tcp.h>#include <rdma/rdma_cm.h>#include <rdma/rdma_cm_ib.h>#include <rdma/ib_cache.h>#include <rdma/ib_cm.h>#include <rdma/ib_sa.h>#include <rdma/iw_cm.h>MODULE_AUTHOR("Sean Hefty");MODULE_DESCRIPTION("Generic RDMA CM Agent");MODULE_LICENSE("Dual BSD/GPL");#define CMA_CM_RESPONSE_TIMEOUT 20#define CMA_MAX_CM_RETRIES 15#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)static void cma_add_one(struct ib_device *device);static void cma_remove_one(struct ib_device *device);static struct ib_client cma_client = {	.name   = "cma",	.add    = cma_add_one,	.remove = cma_remove_one};static struct ib_sa_client sa_client;static struct rdma_addr_client addr_client;static LIST_HEAD(dev_list);static LIST_HEAD(listen_any_list);static DEFINE_MUTEX(lock);static struct workqueue_struct *cma_wq;static DEFINE_IDR(sdp_ps);static DEFINE_IDR(tcp_ps);static DEFINE_IDR(udp_ps);static DEFINE_IDR(ipoib_ps);static int next_port;struct cma_device {	struct list_head	list;	struct ib_device	*device;	struct completion	comp;	atomic_t		refcount;	struct list_head	id_list;};enum cma_state {	CMA_IDLE,	CMA_ADDR_QUERY,	CMA_ADDR_RESOLVED,	CMA_ROUTE_QUERY,	CMA_ROUTE_RESOLVED,	CMA_CONNECT,	CMA_DISCONNECT,	CMA_ADDR_BOUND,	CMA_LISTEN,	CMA_DEVICE_REMOVAL,	CMA_DESTROYING};struct rdma_bind_list {	struct idr		*ps;	struct hlist_head	owners;	unsigned short		port;};/* * Device removal can occur at anytime, so we need extra handling to * serialize notifying the user of device removal with other callbacks. * We do this by disabling removal notification while a callback is in process, * and reporting it after the callback completes. */struct rdma_id_private {	struct rdma_cm_id	id;	struct rdma_bind_list	*bind_list;	struct hlist_node	node;	struct list_head	list; /* listen_any_list or cma_device.list */	struct list_head	listen_list; /* per device listens */	struct cma_device	*cma_dev;	struct list_head	mc_list;	int			internal_id;	enum cma_state		state;	spinlock_t		lock;	struct mutex		qp_mutex;	struct completion	comp;	atomic_t		refcount;	wait_queue_head_t	wait_remove;	atomic_t		dev_remove;	int			backlog;	int			timeout_ms;	struct ib_sa_query	*query;	int			query_id;	union {		struct ib_cm_id	*ib;		struct iw_cm_id	*iw;	} cm_id;	u32			seq_num;	u32			qkey;	u32			qp_num;	u8			srq;	u8			tos;};struct cma_multicast {	struct rdma_id_private *id_priv;	union {		struct ib_sa_multicast *ib;	} multicast;	struct list_head	list;	void			*context;	struct sockaddr		addr;	u8			pad[sizeof(struct sockaddr_in6) -				    sizeof(struct sockaddr)];};struct cma_work {	struct work_struct	work;	struct rdma_id_private	*id;	enum cma_state		old_state;	enum cma_state		new_state;	struct rdma_cm_event	event;};union cma_ip_addr {	struct in6_addr ip6;	struct {		__u32 pad[3];		__u32 addr;	} ip4;};struct cma_hdr {	u8 cma_version;	u8 ip_version;	/* IP version: 7:4 */	__u16 port;	union cma_ip_addr src_addr;	union cma_ip_addr dst_addr;};struct sdp_hh {	u8 bsdh[16];	u8 sdp_version; /* Major version: 7:4 */	u8 ip_version;	/* IP version: 7:4 */	u8 sdp_specific1[10];	__u16 port;	__u16 sdp_specific2;	union cma_ip_addr src_addr;	union cma_ip_addr dst_addr;};struct sdp_hah {	u8 bsdh[16];	u8 sdp_version;};#define CMA_VERSION 0x00#define SDP_MAJ_VERSION 0x2static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp){	unsigned long flags;	int ret;	spin_lock_irqsave(&id_priv->lock, flags);	ret = (id_priv->state == comp);	spin_unlock_irqrestore(&id_priv->lock, flags);	return ret;}static int cma_comp_exch(struct rdma_id_private *id_priv,			 enum cma_state comp, enum cma_state exch){	unsigned long flags;	int ret;	spin_lock_irqsave(&id_priv->lock, flags);	if ((ret = (id_priv->state == comp)))		id_priv->state = exch;	spin_unlock_irqrestore(&id_priv->lock, flags);	return ret;}static enum cma_state cma_exch(struct rdma_id_private *id_priv,			       enum cma_state exch){	unsigned long flags;	enum cma_state old;	spin_lock_irqsave(&id_priv->lock, flags);	old = id_priv->state;	id_priv->state = exch;	spin_unlock_irqrestore(&id_priv->lock, flags);	return old;}static inline u8 cma_get_ip_ver(struct cma_hdr *hdr){	return hdr->ip_version >> 4;}static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver){	hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);}static inline u8 sdp_get_majv(u8 sdp_version){	return sdp_version >> 4;}static inline u8 sdp_get_ip_ver(struct sdp_hh *hh){	return hh->ip_version >> 4;}static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver){	hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);}static inline int cma_is_ud_ps(enum rdma_port_space ps){	return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);}static void cma_attach_to_dev(struct rdma_id_private *id_priv,			      struct cma_device *cma_dev){	atomic_inc(&cma_dev->refcount);	id_priv->cma_dev = cma_dev;	id_priv->id.device = cma_dev->device;	list_add_tail(&id_priv->list, &cma_dev->id_list);}static inline void cma_deref_dev(struct cma_device *cma_dev){	if (atomic_dec_and_test(&cma_dev->refcount))		complete(&cma_dev->comp);}static void cma_detach_from_dev(struct rdma_id_private *id_priv){	list_del(&id_priv->list);	cma_deref_dev(id_priv->cma_dev);	id_priv->cma_dev = NULL;}static int cma_set_qkey(struct ib_device *device, u8 port_num,			enum rdma_port_space ps,			struct rdma_dev_addr *dev_addr, u32 *qkey){	struct ib_sa_mcmember_rec rec;	int ret = 0;	switch (ps) {	case RDMA_PS_UDP:		*qkey = RDMA_UDP_QKEY;		break;	case RDMA_PS_IPOIB:		ib_addr_get_mgid(dev_addr, &rec.mgid);		ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);		*qkey = be32_to_cpu(rec.qkey);		break;	default:		break;	}	return ret;}static int cma_acquire_dev(struct rdma_id_private *id_priv){	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;	struct cma_device *cma_dev;	union ib_gid gid;	int ret = -ENODEV;	switch (rdma_node_get_transport(dev_addr->dev_type)) {	case RDMA_TRANSPORT_IB:		ib_addr_get_sgid(dev_addr, &gid);		break;	case RDMA_TRANSPORT_IWARP:		iw_addr_get_sgid(dev_addr, &gid);		break;	default:		return -ENODEV;	}	list_for_each_entry(cma_dev, &dev_list, list) {		ret = ib_find_cached_gid(cma_dev->device, &gid,					 &id_priv->id.port_num, NULL);		if (!ret) {			ret = cma_set_qkey(cma_dev->device,					   id_priv->id.port_num,					   id_priv->id.ps, dev_addr,					   &id_priv->qkey);			if (!ret)				cma_attach_to_dev(id_priv, cma_dev);			break;		}	}	return ret;}static void cma_deref_id(struct rdma_id_private *id_priv){	if (atomic_dec_and_test(&id_priv->refcount))		complete(&id_priv->comp);}static int cma_disable_remove(struct rdma_id_private *id_priv,			      enum cma_state state){	unsigned long flags;	int ret;	spin_lock_irqsave(&id_priv->lock, flags);	if (id_priv->state == state) {		atomic_inc(&id_priv->dev_remove);		ret = 0;	} else		ret = -EINVAL;	spin_unlock_irqrestore(&id_priv->lock, flags);	return ret;}static void cma_enable_remove(struct rdma_id_private *id_priv){	if (atomic_dec_and_test(&id_priv->dev_remove))		wake_up(&id_priv->wait_remove);}static int cma_has_cm_dev(struct rdma_id_private *id_priv){	return (id_priv->id.device && id_priv->cm_id.ib);}struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,				  void *context, enum rdma_port_space ps){	struct rdma_id_private *id_priv;	id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);	if (!id_priv)		return ERR_PTR(-ENOMEM);	id_priv->state = CMA_IDLE;	id_priv->id.context = context;	id_priv->id.event_handler = event_handler;	id_priv->id.ps = ps;	spin_lock_init(&id_priv->lock);	mutex_init(&id_priv->qp_mutex);	init_completion(&id_priv->comp);	atomic_set(&id_priv->refcount, 1);	init_waitqueue_head(&id_priv->wait_remove);	atomic_set(&id_priv->dev_remove, 0);	INIT_LIST_HEAD(&id_priv->listen_list);	INIT_LIST_HEAD(&id_priv->mc_list);	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);	return &id_priv->id;}EXPORT_SYMBOL(rdma_create_id);static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp){	struct ib_qp_attr qp_attr;	int qp_attr_mask, ret;	qp_attr.qp_state = IB_QPS_INIT;	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);	if (ret)		return ret;	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);	if (ret)		return ret;	qp_attr.qp_state = IB_QPS_RTR;	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);	if (ret)		return ret;	qp_attr.qp_state = IB_QPS_RTS;	qp_attr.sq_psn = 0;	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);	return ret;}static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp){	struct ib_qp_attr qp_attr;	int qp_attr_mask, ret;	qp_attr.qp_state = IB_QPS_INIT;	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);	if (ret)		return ret;	return ib_modify_qp(qp, &qp_attr, qp_attr_mask);}int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,		   struct ib_qp_init_attr *qp_init_attr){	struct rdma_id_private *id_priv;	struct ib_qp *qp;	int ret;	id_priv = container_of(id, struct rdma_id_private, id);	if (id->device != pd->device)		return -EINVAL;	qp = ib_create_qp(pd, qp_init_attr);	if (IS_ERR(qp))		return PTR_ERR(qp);	if (cma_is_ud_ps(id_priv->id.ps))		ret = cma_init_ud_qp(id_priv, qp);	else		ret = cma_init_conn_qp(id_priv, qp);	if (ret)		goto err;	id->qp = qp;	id_priv->qp_num = qp->qp_num;	id_priv->srq = (qp->srq != NULL);	return 0;err:	ib_destroy_qp(qp);	return ret;}EXPORT_SYMBOL(rdma_create_qp);void rdma_destroy_qp(struct rdma_cm_id *id){	struct rdma_id_private *id_priv;	id_priv = container_of(id, struct rdma_id_private, id);	mutex_lock(&id_priv->qp_mutex);	ib_destroy_qp(id_priv->id.qp);	id_priv->id.qp = NULL;	mutex_unlock(&id_priv->qp_mutex);}EXPORT_SYMBOL(rdma_destroy_qp);static int cma_modify_qp_rtr(struct rdma_id_private *id_priv){	struct ib_qp_attr qp_attr;	int qp_attr_mask, ret;	mutex_lock(&id_priv->qp_mutex);	if (!id_priv->id.qp) {		ret = 0;		goto out;	}	/* Need to update QP attributes from default values. */	qp_attr.qp_state = IB_QPS_INIT;	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);	if (ret)		goto out;	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);	if (ret)		goto out;	qp_attr.qp_state = IB_QPS_RTR;	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);	if (ret)		goto out;	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);out:	mutex_unlock(&id_priv->qp_mutex);	return ret;}static int cma_modify_qp_rts(struct rdma_id_private *id_priv)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -