⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cma.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
out:	cma_enable_remove(id_priv);	cma_deref_id(id_priv);	if (destroy)		rdma_destroy_id(&id_priv->id);	kfree(work);}static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms){	struct rdma_route *route = &id_priv->id.route;	struct cma_work *work;	int ret;	work = kzalloc(sizeof *work, GFP_KERNEL);	if (!work)		return -ENOMEM;	work->id = id_priv;	INIT_WORK(&work->work, cma_work_handler);	work->old_state = CMA_ROUTE_QUERY;	work->new_state = CMA_ROUTE_RESOLVED;	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;	route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);	if (!route->path_rec) {		ret = -ENOMEM;		goto err1;	}	ret = cma_query_ib_route(id_priv, timeout_ms, work);	if (ret)		goto err2;	return 0;err2:	kfree(route->path_rec);	route->path_rec = NULL;err1:	kfree(work);	return ret;}int rdma_set_ib_paths(struct rdma_cm_id *id,		      struct ib_sa_path_rec *path_rec, int num_paths){	struct rdma_id_private *id_priv;	int ret;	id_priv = container_of(id, struct rdma_id_private, id);	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))		return -EINVAL;	id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);	if (!id->route.path_rec) {		ret = -ENOMEM;		goto err;	}	memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);	return 0;err:	cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);	return ret;}EXPORT_SYMBOL(rdma_set_ib_paths);static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms){	struct cma_work *work;	work = kzalloc(sizeof *work, GFP_KERNEL);	if (!work)		return -ENOMEM;	work->id = id_priv;	INIT_WORK(&work->work, cma_work_handler);	work->old_state = CMA_ROUTE_QUERY;	work->new_state = CMA_ROUTE_RESOLVED;	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;	queue_work(cma_wq, &work->work);	return 0;}int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms){	struct rdma_id_private *id_priv;	int ret;	id_priv = container_of(id, struct rdma_id_private, id);	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))		return -EINVAL;	atomic_inc(&id_priv->refcount);	switch (rdma_node_get_transport(id->device->node_type)) {	case RDMA_TRANSPORT_IB:		ret = cma_resolve_ib_route(id_priv, timeout_ms);		break;	case RDMA_TRANSPORT_IWARP:		ret = cma_resolve_iw_route(id_priv, timeout_ms);		break;	default:		ret = -ENOSYS;		break;	}	if (ret)		goto err;	return 0;err:	cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);	cma_deref_id(id_priv);	return ret;}EXPORT_SYMBOL(rdma_resolve_route);static int cma_bind_loopback(struct rdma_id_private *id_priv){	struct cma_device *cma_dev;	struct ib_port_attr port_attr;	union ib_gid gid;	u16 pkey;	int ret;	u8 p;	mutex_lock(&lock);	if (list_empty(&dev_list)) {		ret = -ENODEV;		goto out;	}	list_for_each_entry(cma_dev, &dev_list, list)		for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)			if (!ib_query_port(cma_dev->device, p, &port_attr) &&			    port_attr.state == IB_PORT_ACTIVE)				goto port_found;	p = 1;	cma_dev = list_entry(dev_list.next, struct cma_device, list);port_found:	ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);	if (ret)		goto out;	ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);	if (ret)		goto out;	ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);	id_priv->id.port_num = p;	cma_attach_to_dev(id_priv, cma_dev);out:	mutex_unlock(&lock);	return ret;}static void addr_handler(int status, struct sockaddr *src_addr,			 struct rdma_dev_addr *dev_addr, void *context){	struct rdma_id_private *id_priv = context;	struct rdma_cm_event event;	memset(&event, 0, sizeof event);	atomic_inc(&id_priv->dev_remove);	/*	 * Grab mutex to block rdma_destroy_id() from removing the device while	 * we're trying to acquire it.	 */	mutex_lock(&lock);	if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {		mutex_unlock(&lock);		goto out;	}	if (!status && !id_priv->cma_dev)		status = cma_acquire_dev(id_priv);	mutex_unlock(&lock);	if (status) {		if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))			goto out;		event.event = RDMA_CM_EVENT_ADDR_ERROR;		event.status = status;	} else {		memcpy(&id_priv->id.route.addr.src_addr, src_addr,		       ip_addr_size(src_addr));		event.event = RDMA_CM_EVENT_ADDR_RESOLVED;	}	if (id_priv->id.event_handler(&id_priv->id, &event)) {		cma_exch(id_priv, CMA_DESTROYING);		cma_enable_remove(id_priv);		cma_deref_id(id_priv);		rdma_destroy_id(&id_priv->id);		return;	}out:	cma_enable_remove(id_priv);	cma_deref_id(id_priv);}static int cma_resolve_loopback(struct rdma_id_private *id_priv){	struct cma_work *work;	struct sockaddr_in *src_in, *dst_in;	union ib_gid gid;	int ret;	work = kzalloc(sizeof *work, GFP_KERNEL);	if (!work)		return -ENOMEM;	if (!id_priv->cma_dev) {		ret = cma_bind_loopback(id_priv);		if (ret)			goto err;	}	ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);	ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);	if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {		src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;		dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;		src_in->sin_family = dst_in->sin_family;		src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;	}	work->id = id_priv;	INIT_WORK(&work->work, cma_work_handler);	work->old_state = CMA_ADDR_QUERY;	work->new_state = CMA_ADDR_RESOLVED;	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;	queue_work(cma_wq, &work->work);	return 0;err:	kfree(work);	return ret;}static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,			 struct sockaddr *dst_addr){	if (src_addr && src_addr->sa_family)		return rdma_bind_addr(id, src_addr);	else		return cma_bind_any(id, dst_addr->sa_family);}int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,		      struct sockaddr *dst_addr, int timeout_ms){	struct rdma_id_private *id_priv;	int ret;	id_priv = container_of(id, struct rdma_id_private, id);	if (id_priv->state == CMA_IDLE) {		ret = cma_bind_addr(id, src_addr, dst_addr);		if (ret)			return ret;	}	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))		return -EINVAL;	atomic_inc(&id_priv->refcount);	memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));	if (cma_any_addr(dst_addr))		ret = cma_resolve_loopback(id_priv);	else		ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,				      dst_addr, &id->route.addr.dev_addr,				      timeout_ms, addr_handler, id_priv);	if (ret)		goto err;	return 0;err:	cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);	cma_deref_id(id_priv);	return ret;}EXPORT_SYMBOL(rdma_resolve_addr);static void cma_bind_port(struct rdma_bind_list *bind_list,			  struct rdma_id_private *id_priv){	struct sockaddr_in *sin;	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;	sin->sin_port = htons(bind_list->port);	id_priv->bind_list = bind_list;	hlist_add_head(&id_priv->node, &bind_list->owners);}static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,			  unsigned short snum){	struct rdma_bind_list *bind_list;	int port, ret;	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);	if (!bind_list)		return -ENOMEM;	do {		ret = idr_get_new_above(ps, bind_list, snum, &port);	} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));	if (ret)		goto err1;	if (port != snum) {		ret = -EADDRNOTAVAIL;		goto err2;	}	bind_list->ps = ps;	bind_list->port = (unsigned short) port;	cma_bind_port(bind_list, id_priv);	return 0;err2:	idr_remove(ps, port);err1:	kfree(bind_list);	return ret;}static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv){	struct rdma_bind_list *bind_list;	int port, ret, low, high;	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);	if (!bind_list)		return -ENOMEM;retry:	/* FIXME: add proper port randomization per like inet_csk_get_port */	do {		ret = idr_get_new_above(ps, bind_list, next_port, &port);	} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));	if (ret)		goto err1;	inet_get_local_port_range(&low, &high);	if (port > high) {		if (next_port != low) {			idr_remove(ps, port);			next_port = low;			goto retry;		}		ret = -EADDRNOTAVAIL;		goto err2;	}	if (port == high)		next_port = low;	else		next_port = port + 1;	bind_list->ps = ps;	bind_list->port = (unsigned short) port;	cma_bind_port(bind_list, id_priv);	return 0;err2:	idr_remove(ps, port);err1:	kfree(bind_list);	return ret;}static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv){	struct rdma_id_private *cur_id;	struct sockaddr_in *sin, *cur_sin;	struct rdma_bind_list *bind_list;	struct hlist_node *node;	unsigned short snum;	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;	snum = ntohs(sin->sin_port);	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))		return -EACCES;	bind_list = idr_find(ps, snum);	if (!bind_list)		return cma_alloc_port(ps, id_priv, snum);	/*	 * We don't support binding to any address if anyone is bound to	 * a specific address on the same port.	 */	if (cma_any_addr(&id_priv->id.route.addr.src_addr))		return -EADDRNOTAVAIL;	hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {		if (cma_any_addr(&cur_id->id.route.addr.src_addr))			return -EADDRNOTAVAIL;		cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;		if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)			return -EADDRINUSE;	}	cma_bind_port(bind_list, id_priv);	return 0;}static int cma_get_port(struct rdma_id_private *id_priv){	struct idr *ps;	int ret;	switch (id_priv->id.ps) {	case RDMA_PS_SDP:		ps = &sdp_ps;		break;	case RDMA_PS_TCP:		ps = &tcp_ps;		break;	case RDMA_PS_UDP:		ps = &udp_ps;		break;	case RDMA_PS_IPOIB:		ps = &ipoib_ps;		break;	default:		return -EPROTONOSUPPORT;	}	mutex_lock(&lock);	if (cma_any_port(&id_priv->id.route.addr.src_addr))		ret = cma_alloc_any_port(ps, id_priv);	else		ret = cma_use_port(ps, id_priv);	mutex_unlock(&lock);	return ret;}int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr){	struct rdma_id_private *id_priv;	int ret;	if (addr->sa_family != AF_INET)		return -EAFNOSUPPORT;	id_priv = container_of(id, struct rdma_id_private, id);	if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))		return -EINVAL;	if (!cma_any_addr(addr)) {		ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);		if (ret)			goto err1;		mutex_lock(&lock);		ret = cma_acquire_dev(id_priv);		mutex_unlock(&lock);		if (ret)			goto err1;	}	memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));	ret = cma_get_port(id_priv);	if (ret)		goto err2;	return 0;err2:	if (!cma_any_addr(addr)) {		mutex_lock(&lock);		cma_detach_from_dev(id_priv);		mutex_unlock(&lock);	}err1:	cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);	return ret;}EXPORT_SYMBOL(rdma_bind_addr);static int cma_format_hdr(void *hdr, enum rdma_port_space ps,			  struct rdma_route *route){	struct sockaddr_in *src4, *dst4;	struct cma_hdr *cma_hdr;	struct sdp_hh *sdp_hdr;	src4 = (struct sockaddr_in *) &route->addr.src_addr;	dst4 = (struct sockaddr_in *) &route->addr.dst_addr;	switch (ps) {	case RDMA_PS_SDP:		sdp_hdr = hdr;		if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)			return -EINVAL;		sdp_set_ip_ver(sdp_hdr, 4);		sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;		sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;		sdp_hdr->port = src4->sin_port;		break;	default:		cma_hdr = hdr;		cma_hdr->cma_version = CMA_VERSION;		cma_set_ip_ver(cma_hdr, 4);		cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;		cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;		cma_hdr->port = src4->sin_port;		break;	}	return 0;}static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,				struct ib_cm_event *ib_event){	struct rdma_id_private *id_priv = cm_id->context;	struct rdma_cm_event event;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -