⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sa_query.c

📁 h内核
💻 C
📖 第 1 页 / 共 2 页
字号:
{	unsigned long flags;	memset(mad, 0, sizeof *mad);	mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;	mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;	mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;	spin_lock_irqsave(&tid_lock, flags);	mad->mad_hdr.tid           =		cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);	spin_unlock_irqrestore(&tid_lock, flags);}static int send_mad(struct ib_sa_query *query, int timeout_ms){	struct ib_sa_port *port = query->port;	unsigned long flags;	int ret;	struct ib_sge      gather_list;	struct ib_send_wr *bad_wr, wr = {		.opcode      = IB_WR_SEND,		.sg_list     = &gather_list,		.num_sge     = 1,		.send_flags  = IB_SEND_SIGNALED,		.wr	     = {			 .ud = {				 .mad_hdr     = &query->mad->mad_hdr,				 .remote_qpn  = 1,				 .remote_qkey = IB_QP1_QKEY,				 .timeout_ms  = timeout_ms			 }		 }	};retry:	if (!idr_pre_get(&query_idr, GFP_ATOMIC))		return -ENOMEM;	spin_lock_irqsave(&idr_lock, flags);	ret = idr_get_new(&query_idr, query, &query->id);	spin_unlock_irqrestore(&idr_lock, flags);	if (ret == -EAGAIN)		goto retry;	if (ret)		return ret;	wr.wr_id = query->id;	spin_lock_irqsave(&port->ah_lock, flags);	kref_get(&port->sm_ah->ref);	query->sm_ah = port->sm_ah;	wr.wr.ud.ah  = port->sm_ah->ah;	spin_unlock_irqrestore(&port->ah_lock, flags);	gather_list.addr   = dma_map_single(port->agent->device->dma_device,					    query->mad,					    sizeof (struct ib_sa_mad),					    DMA_TO_DEVICE);	gather_list.length = sizeof (struct ib_sa_mad);	gather_list.lkey   = port->mr->lkey;	pci_unmap_addr_set(query, mapping, gather_list.addr);	ret = ib_post_send_mad(port->agent, &wr, &bad_wr);	if (ret) {		dma_unmap_single(port->agent->device->dma_device,				 pci_unmap_addr(query, mapping),				 sizeof (struct ib_sa_mad),				 DMA_TO_DEVICE);		kref_put(&query->sm_ah->ref, free_sm_ah);		spin_lock_irqsave(&idr_lock, flags);		idr_remove(&query_idr, query->id);		spin_unlock_irqrestore(&idr_lock, flags);	}	return ret;}static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,				    int status,				    struct ib_sa_mad *mad){	struct ib_sa_path_query *query =		container_of(sa_query, struct ib_sa_path_query, sa_query);	if (mad) {		struct ib_sa_path_rec rec;		ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),			  mad->data, &rec);		query->callback(status, &rec, query->context);	} else		query->callback(status, NULL, query->context);}static void ib_sa_path_rec_release(struct ib_sa_query *sa_query){	kfree(sa_query->mad);	kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));}/** * ib_sa_path_rec_get - Start a Path get query * @device:device to send query on * @port_num: port number to send query on * @rec:Path Record to send in query * @comp_mask:component mask to send in query * @timeout_ms:time to wait for response * @gfp_mask:GFP mask to use for internal allocations * @callback:function called when query completes, times out or is * canceled * @context:opaque user context passed to callback * @sa_query:query context, used to cancel query * * Send a Path Record Get query to the SA to look up a path.  The * callback function will be called when the query completes (or * fails); status is 0 for a successful response, -EINTR if the query * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error * occurred sending the query.  The resp parameter of the callback is * only valid if status is 0. * * If the return value of ib_sa_path_rec_get() is negative, it is an * error code.  Otherwise it is a query ID that can be used to cancel * the query. */int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,		       struct ib_sa_path_rec *rec,		       ib_sa_comp_mask comp_mask,		       int timeout_ms, int gfp_mask,		       void (*callback)(int status,					struct ib_sa_path_rec *resp,					void *context),		       void *context,		       struct ib_sa_query **sa_query){	struct ib_sa_path_query *query;	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);	struct ib_sa_port   *port   = &sa_dev->port[port_num - sa_dev->start_port];	struct ib_mad_agent *agent  = port->agent;	int ret;	query = kmalloc(sizeof *query, gfp_mask);	if (!query)		return -ENOMEM;	query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);	if (!query->sa_query.mad) {		kfree(query);		return -ENOMEM;	}	query->callback = callback;	query->context  = context;	init_mad(query->sa_query.mad, agent);	query->sa_query.callback              = ib_sa_path_rec_callback;	query->sa_query.release               = ib_sa_path_rec_release;	query->sa_query.port                  = port;	query->sa_query.mad->mad_hdr.method   = IB_MGMT_METHOD_GET;	query->sa_query.mad->mad_hdr.attr_id  = cpu_to_be16(IB_SA_ATTR_PATH_REC);	query->sa_query.mad->sa_hdr.comp_mask = comp_mask;	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),		rec, query->sa_query.mad->data);	*sa_query = &query->sa_query;	ret = send_mad(&query->sa_query, timeout_ms);	if (ret) {		*sa_query = NULL;		kfree(query->sa_query.mad);		kfree(query);	}	return ret ? ret : query->sa_query.id;}EXPORT_SYMBOL(ib_sa_path_rec_get);static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,					int status,					struct ib_sa_mad *mad){	struct ib_sa_mcmember_query *query =		container_of(sa_query, struct ib_sa_mcmember_query, sa_query);	if (mad) {		struct ib_sa_mcmember_rec rec;		ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),			  mad->data, &rec);		query->callback(status, &rec, query->context);	} else		query->callback(status, NULL, query->context);}static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query){	kfree(sa_query->mad);	kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));}int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,			     u8 method,			     struct ib_sa_mcmember_rec *rec,			     ib_sa_comp_mask comp_mask,			     int timeout_ms, int gfp_mask,			     void (*callback)(int status,					      struct ib_sa_mcmember_rec *resp,					      void *context),			     void *context,			     struct ib_sa_query **sa_query){	struct ib_sa_mcmember_query *query;	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);	struct ib_sa_port   *port   = &sa_dev->port[port_num - sa_dev->start_port];	struct ib_mad_agent *agent  = port->agent;	int ret;	query = kmalloc(sizeof *query, gfp_mask);	if (!query)		return -ENOMEM;	query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);	if (!query->sa_query.mad) {		kfree(query);		return -ENOMEM;	}	query->callback = callback;	query->context  = context;	init_mad(query->sa_query.mad, agent);	query->sa_query.callback              = ib_sa_mcmember_rec_callback;	query->sa_query.release               = ib_sa_mcmember_rec_release;	query->sa_query.port                  = port;	query->sa_query.mad->mad_hdr.method   = method;	query->sa_query.mad->mad_hdr.attr_id  = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);	query->sa_query.mad->sa_hdr.comp_mask = comp_mask;	ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),		rec, query->sa_query.mad->data);	*sa_query = &query->sa_query;	ret = send_mad(&query->sa_query, timeout_ms);	if (ret) {		*sa_query = NULL;		kfree(query->sa_query.mad);		kfree(query);	}	return ret ? ret : query->sa_query.id;}EXPORT_SYMBOL(ib_sa_mcmember_rec_query);static void send_handler(struct ib_mad_agent *agent,			 struct ib_mad_send_wc *mad_send_wc){	struct ib_sa_query *query;	unsigned long flags;	spin_lock_irqsave(&idr_lock, flags);	query = idr_find(&query_idr, mad_send_wc->wr_id);	spin_unlock_irqrestore(&idr_lock, flags);	if (!query)		return;	switch (mad_send_wc->status) {	case IB_WC_SUCCESS:		/* No callback -- already got recv */		break;	case IB_WC_RESP_TIMEOUT_ERR:		query->callback(query, -ETIMEDOUT, NULL);		break;	case IB_WC_WR_FLUSH_ERR:		query->callback(query, -EINTR, NULL);		break;	default:		query->callback(query, -EIO, NULL);		break;	}	dma_unmap_single(agent->device->dma_device,			 pci_unmap_addr(query, mapping),			 sizeof (struct ib_sa_mad),			 DMA_TO_DEVICE);	kref_put(&query->sm_ah->ref, free_sm_ah);	query->release(query);	spin_lock_irqsave(&idr_lock, flags);	idr_remove(&query_idr, mad_send_wc->wr_id);	spin_unlock_irqrestore(&idr_lock, flags);}static void recv_handler(struct ib_mad_agent *mad_agent,			 struct ib_mad_recv_wc *mad_recv_wc){	struct ib_sa_query *query;	unsigned long flags;	spin_lock_irqsave(&idr_lock, flags);	query = idr_find(&query_idr, mad_recv_wc->wc->wr_id);	spin_unlock_irqrestore(&idr_lock, flags);	if (query) {		if (mad_recv_wc->wc->status == IB_WC_SUCCESS)			query->callback(query,					mad_recv_wc->recv_buf.mad->mad_hdr.status ?					-EINVAL : 0,					(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);		else			query->callback(query, -EIO, NULL);	}	ib_free_recv_mad(mad_recv_wc);}static void ib_sa_add_one(struct ib_device *device){	struct ib_sa_device *sa_dev;	int s, e, i;	if (device->node_type == IB_NODE_SWITCH)		s = e = 0;	else {		s = 1;		e = device->phys_port_cnt;	}	sa_dev = kmalloc(sizeof *sa_dev +			 (e - s + 1) * sizeof (struct ib_sa_port),			 GFP_KERNEL);	if (!sa_dev)		return;	sa_dev->start_port = s;	sa_dev->end_port   = e;	for (i = 0; i <= e - s; ++i) {		sa_dev->port[i].mr       = NULL;		sa_dev->port[i].sm_ah    = NULL;		sa_dev->port[i].port_num = i + s;		spin_lock_init(&sa_dev->port[i].ah_lock);		sa_dev->port[i].agent =			ib_register_mad_agent(device, i + s, IB_QPT_GSI,					      NULL, 0, send_handler,					      recv_handler, sa_dev);		if (IS_ERR(sa_dev->port[i].agent))			goto err;		sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,						   IB_ACCESS_LOCAL_WRITE);		if (IS_ERR(sa_dev->port[i].mr)) {			ib_unregister_mad_agent(sa_dev->port[i].agent);			goto err;		}		INIT_WORK(&sa_dev->port[i].update_task,			  update_sm_ah, &sa_dev->port[i]);	}	ib_set_client_data(device, &sa_client, sa_dev);	/*	 * We register our event handler after everything is set up,	 * and then update our cached info after the event handler is	 * registered to avoid any problems if a port changes state	 * during our initialization.	 */	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);	if (ib_register_event_handler(&sa_dev->event_handler))		goto err;	for (i = 0; i <= e - s; ++i)		update_sm_ah(&sa_dev->port[i]);	return;err:	while (--i >= 0) {		ib_dereg_mr(sa_dev->port[i].mr);		ib_unregister_mad_agent(sa_dev->port[i].agent);	}	kfree(sa_dev);	return;}static void ib_sa_remove_one(struct ib_device *device){	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);	int i;	if (!sa_dev)		return;	ib_unregister_event_handler(&sa_dev->event_handler);	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {		ib_unregister_mad_agent(sa_dev->port[i].agent);		kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);	}	kfree(sa_dev);}static int __init ib_sa_init(void){	int ret;	spin_lock_init(&idr_lock);	spin_lock_init(&tid_lock);	get_random_bytes(&tid, sizeof tid);	ret = ib_register_client(&sa_client);	if (ret)		printk(KERN_ERR "Couldn't register ib_sa client\n");	return ret;}static void __exit ib_sa_cleanup(void){	ib_unregister_client(&sa_client);}module_init(ib_sa_init);module_exit(ib_sa_cleanup);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -