⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cma.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (cma_get_net_info(ib_event->private_data, listen_id->ps,			     &ip_ver, &port, &src, &dst))		goto err;	cma_save_net_info(&id->route.addr, &listen_id->route.addr,			  ip_ver, port, src, dst);	ret = rdma_translate_ip(&id->route.addr.src_addr,				&id->route.addr.dev_addr);	if (ret)		goto err;	id_priv = container_of(id, struct rdma_id_private, id);	id_priv->state = CMA_CONNECT;	return id_priv;err:	rdma_destroy_id(id);	return NULL;}static void cma_set_req_event_data(struct rdma_cm_event *event,				   struct ib_cm_req_event_param *req_data,				   void *private_data, int offset){	event->param.conn.private_data = private_data + offset;	event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;	event->param.conn.responder_resources = req_data->responder_resources;	event->param.conn.initiator_depth = req_data->initiator_depth;	event->param.conn.flow_control = req_data->flow_control;	event->param.conn.retry_count = req_data->retry_count;	event->param.conn.rnr_retry_count = req_data->rnr_retry_count;	event->param.conn.srq = req_data->srq;	event->param.conn.qp_num = req_data->remote_qpn;}static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event){	struct rdma_id_private *listen_id, *conn_id;	struct rdma_cm_event event;	int offset, ret;	listen_id = cm_id->context;	if (cma_disable_remove(listen_id, CMA_LISTEN))		return -ECONNABORTED;	memset(&event, 0, sizeof event);	offset = cma_user_data_offset(listen_id->id.ps);	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;	if (cma_is_ud_ps(listen_id->id.ps)) {		conn_id = cma_new_udp_id(&listen_id->id, ib_event);		event.param.ud.private_data = ib_event->private_data + offset;		event.param.ud.private_data_len =				IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;	} else {		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);		conn_id = cma_new_conn_id(&listen_id->id, ib_event);		cma_set_req_event_data(&event, &ib_event->param.req_rcvd,				       ib_event->private_data, offset);	}	if (!conn_id) {		ret = -ENOMEM;		goto out;	}	atomic_inc(&conn_id->dev_remove);	mutex_lock(&lock);	ret = cma_acquire_dev(conn_id);	mutex_unlock(&lock);	if (ret)		goto release_conn_id;	conn_id->cm_id.ib = cm_id;	cm_id->context = conn_id;	cm_id->cm_handler = cma_ib_handler;	ret = conn_id->id.event_handler(&conn_id->id, &event);	if (!ret)		goto out;	/* Destroy the CM ID by returning a non-zero value. */	conn_id->cm_id.ib = NULL;release_conn_id:	cma_exch(conn_id, CMA_DESTROYING);	cma_enable_remove(conn_id);	rdma_destroy_id(&conn_id->id);out:	cma_enable_remove(listen_id);	return ret;}static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr){	return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));}static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,				 struct ib_cm_compare_data *compare){	struct cma_hdr *cma_data, *cma_mask;	struct sdp_hh *sdp_data, *sdp_mask;	__u32 ip4_addr;	struct in6_addr ip6_addr;	memset(compare, 0, sizeof *compare);	cma_data = (void *) compare->data;	cma_mask = (void *) compare->mask;	sdp_data = (void *) compare->data;	sdp_mask = (void *) compare->mask;	switch (addr->sa_family) {	case AF_INET:		ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;		if (ps == RDMA_PS_SDP) {			sdp_set_ip_ver(sdp_data, 4);			sdp_set_ip_ver(sdp_mask, 0xF);			sdp_data->dst_addr.ip4.addr = ip4_addr;			sdp_mask->dst_addr.ip4.addr = ~0;		} else {			cma_set_ip_ver(cma_data, 4);			cma_set_ip_ver(cma_mask, 0xF);			cma_data->dst_addr.ip4.addr = ip4_addr;			cma_mask->dst_addr.ip4.addr = ~0;		}		break;	case AF_INET6:		ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;		if (ps == RDMA_PS_SDP) {			sdp_set_ip_ver(sdp_data, 6);			sdp_set_ip_ver(sdp_mask, 0xF);			sdp_data->dst_addr.ip6 = ip6_addr;			memset(&sdp_mask->dst_addr.ip6, 0xFF,			       sizeof sdp_mask->dst_addr.ip6);		} else {			cma_set_ip_ver(cma_data, 6);			cma_set_ip_ver(cma_mask, 0xF);			cma_data->dst_addr.ip6 = ip6_addr;			memset(&cma_mask->dst_addr.ip6, 0xFF,			       sizeof cma_mask->dst_addr.ip6);		}		break;	default:		break;	}}static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event){	struct rdma_id_private *id_priv = iw_id->context;	struct rdma_cm_event event;	struct sockaddr_in *sin;	int ret = 0;	if (cma_disable_remove(id_priv, CMA_CONNECT))		return 0;	memset(&event, 0, sizeof event);	switch (iw_event->event) {	case IW_CM_EVENT_CLOSE:		event.event = RDMA_CM_EVENT_DISCONNECTED;		break;	case IW_CM_EVENT_CONNECT_REPLY:		sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;		*sin = iw_event->local_addr;		sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;		*sin = iw_event->remote_addr;		switch (iw_event->status) {		case 0:			event.event = RDMA_CM_EVENT_ESTABLISHED;			break;		case -ECONNRESET:		case -ECONNREFUSED:			event.event = RDMA_CM_EVENT_REJECTED;			break;		case -ETIMEDOUT:			event.event = RDMA_CM_EVENT_UNREACHABLE;			break;		default:			event.event = RDMA_CM_EVENT_CONNECT_ERROR;			break;		}		break;	case IW_CM_EVENT_ESTABLISHED:		event.event = RDMA_CM_EVENT_ESTABLISHED;		break;	default:		BUG_ON(1);	}	event.status = iw_event->status;	event.param.conn.private_data = iw_event->private_data;	event.param.conn.private_data_len = iw_event->private_data_len;	ret = id_priv->id.event_handler(&id_priv->id, &event);	if (ret) {		/* Destroy the CM ID by returning a non-zero value. */		id_priv->cm_id.iw = NULL;		cma_exch(id_priv, CMA_DESTROYING);		cma_enable_remove(id_priv);		rdma_destroy_id(&id_priv->id);		return ret;	}	cma_enable_remove(id_priv);	return ret;}static int iw_conn_req_handler(struct iw_cm_id *cm_id,			       struct iw_cm_event *iw_event){	struct rdma_cm_id *new_cm_id;	struct rdma_id_private *listen_id, *conn_id;	struct sockaddr_in *sin;	struct net_device *dev = NULL;	struct rdma_cm_event event;	int ret;	listen_id = cm_id->context;	if (cma_disable_remove(listen_id, CMA_LISTEN))		return -ECONNABORTED;	/* Create a new RDMA id for the new IW CM ID */	new_cm_id = rdma_create_id(listen_id->id.event_handler,				   listen_id->id.context,				   RDMA_PS_TCP);	if (!new_cm_id) {		ret = -ENOMEM;		goto out;	}	conn_id = container_of(new_cm_id, struct rdma_id_private, id);	atomic_inc(&conn_id->dev_remove);	conn_id->state = CMA_CONNECT;	dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);	if (!dev) {		ret = -EADDRNOTAVAIL;		cma_enable_remove(conn_id);		rdma_destroy_id(new_cm_id);		goto out;	}	ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);	if (ret) {		cma_enable_remove(conn_id);		rdma_destroy_id(new_cm_id);		goto out;	}	mutex_lock(&lock);	ret = cma_acquire_dev(conn_id);	mutex_unlock(&lock);	if (ret) {		cma_enable_remove(conn_id);		rdma_destroy_id(new_cm_id);		goto out;	}	conn_id->cm_id.iw = cm_id;	cm_id->context = conn_id;	cm_id->cm_handler = cma_iw_handler;	sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;	*sin = iw_event->local_addr;	sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;	*sin = iw_event->remote_addr;	memset(&event, 0, sizeof event);	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;	event.param.conn.private_data = iw_event->private_data;	event.param.conn.private_data_len = iw_event->private_data_len;	ret = conn_id->id.event_handler(&conn_id->id, &event);	if (ret) {		/* User wants to destroy the CM ID */		conn_id->cm_id.iw = NULL;		cma_exch(conn_id, CMA_DESTROYING);		cma_enable_remove(conn_id);		rdma_destroy_id(&conn_id->id);	}out:	if (dev)		dev_put(dev);	cma_enable_remove(listen_id);	return ret;}static int cma_ib_listen(struct rdma_id_private *id_priv){	struct ib_cm_compare_data compare_data;	struct sockaddr *addr;	__be64 svc_id;	int ret;	id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,					    id_priv);	if (IS_ERR(id_priv->cm_id.ib))		return PTR_ERR(id_priv->cm_id.ib);	addr = &id_priv->id.route.addr.src_addr;	svc_id = cma_get_service_id(id_priv->id.ps, addr);	if (cma_any_addr(addr))		ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);	else {		cma_set_compare_data(id_priv->id.ps, addr, &compare_data);		ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);	}	if (ret) {		ib_destroy_cm_id(id_priv->cm_id.ib);		id_priv->cm_id.ib = NULL;	}	return ret;}static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog){	int ret;	struct sockaddr_in *sin;	id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,					    iw_conn_req_handler,					    id_priv);	if (IS_ERR(id_priv->cm_id.iw))		return PTR_ERR(id_priv->cm_id.iw);	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;	id_priv->cm_id.iw->local_addr = *sin;	ret = iw_cm_listen(id_priv->cm_id.iw, backlog);	if (ret) {		iw_destroy_cm_id(id_priv->cm_id.iw);		id_priv->cm_id.iw = NULL;	}	return ret;}static int cma_listen_handler(struct rdma_cm_id *id,			      struct rdma_cm_event *event){	struct rdma_id_private *id_priv = id->context;	id->context = id_priv->id.context;	id->event_handler = id_priv->id.event_handler;	return id_priv->id.event_handler(id, event);}static void cma_listen_on_dev(struct rdma_id_private *id_priv,			      struct cma_device *cma_dev){	struct rdma_id_private *dev_id_priv;	struct rdma_cm_id *id;	int ret;	id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);	if (IS_ERR(id))		return;	dev_id_priv = container_of(id, struct rdma_id_private, id);	dev_id_priv->state = CMA_ADDR_BOUND;	memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,	       ip_addr_size(&id_priv->id.route.addr.src_addr));	cma_attach_to_dev(dev_id_priv, cma_dev);	list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);	atomic_inc(&id_priv->refcount);	dev_id_priv->internal_id = 1;	ret = rdma_listen(id, id_priv->backlog);	if (ret)		printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "		       "listening on device %s", ret, cma_dev->device->name);}static void cma_listen_on_all(struct rdma_id_private *id_priv){	struct cma_device *cma_dev;	mutex_lock(&lock);	list_add_tail(&id_priv->list, &listen_any_list);	list_for_each_entry(cma_dev, &dev_list, list)		cma_listen_on_dev(id_priv, cma_dev);	mutex_unlock(&lock);}static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af){	struct sockaddr_in addr_in;	memset(&addr_in, 0, sizeof addr_in);	addr_in.sin_family = af;	return rdma_bind_addr(id, (struct sockaddr *) &addr_in);}int rdma_listen(struct rdma_cm_id *id, int backlog){	struct rdma_id_private *id_priv;	int ret;	id_priv = container_of(id, struct rdma_id_private, id);	if (id_priv->state == CMA_IDLE) {		ret = cma_bind_any(id, AF_INET);		if (ret)			return ret;	}	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))		return -EINVAL;	id_priv->backlog = backlog;	if (id->device) {		switch (rdma_node_get_transport(id->device->node_type)) {		case RDMA_TRANSPORT_IB:			ret = cma_ib_listen(id_priv);			if (ret)				goto err;			break;		case RDMA_TRANSPORT_IWARP:			ret = cma_iw_listen(id_priv, backlog);			if (ret)				goto err;			break;		default:			ret = -ENOSYS;			goto err;		}	} else		cma_listen_on_all(id_priv);	return 0;err:	id_priv->backlog = 0;	cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);	return ret;}EXPORT_SYMBOL(rdma_listen);void rdma_set_service_type(struct rdma_cm_id *id, int tos){	struct rdma_id_private *id_priv;	id_priv = container_of(id, struct rdma_id_private, id);	id_priv->tos = (u8) tos;}EXPORT_SYMBOL(rdma_set_service_type);static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,			      void *context){	struct cma_work *work = context;	struct rdma_route *route;	route = &work->id->id.route;	if (!status) {		route->num_paths = 1;		*route->path_rec = *path_rec;	} else {		work->old_state = CMA_ROUTE_QUERY;		work->new_state = CMA_ADDR_RESOLVED;		work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;		work->event.status = status;	}	queue_work(cma_wq, &work->work);}static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,			      struct cma_work *work){	struct rdma_addr *addr = &id_priv->id.route.addr;	struct ib_sa_path_rec path_rec;	ib_sa_comp_mask comp_mask;	struct sockaddr_in6 *sin6;	memset(&path_rec, 0, sizeof path_rec);	ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);	ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));	path_rec.numb_path = 1;	path_rec.reversible = 1;	path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);	comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |		    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |		    IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;	if (addr->src_addr.sa_family == AF_INET) {		path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);		comp_mask |= IB_SA_PATH_REC_QOS_CLASS;	} else {		sin6 = (struct sockaddr_in6 *) &addr->src_addr;		path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;	}	id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,					       id_priv->id.port_num, &path_rec,					       comp_mask, timeout_ms,					       GFP_KERNEL, cma_query_handler,					       work, &id_priv->query);	return (id_priv->query_id < 0) ? id_priv->query_id : 0;}static void cma_work_handler(struct work_struct *_work){	struct cma_work *work = container_of(_work, struct cma_work, work);	struct rdma_id_private *id_priv = work->id;	int destroy = 0;	atomic_inc(&id_priv->dev_remove);	if (!cma_comp_exch(id_priv, work->old_state, work->new_state))		goto out;	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {		cma_exch(id_priv, CMA_DESTROYING);		destroy = 1;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -