⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mad.c

📁 h内核
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (mad_snoop_priv->snoop_index < 0) {		ret = ERR_PTR(mad_snoop_priv->snoop_index);		goto error2;	}	atomic_set(&mad_snoop_priv->refcount, 1);	return &mad_snoop_priv->agent;error2:	kfree(mad_snoop_priv);error1:	return ret;}EXPORT_SYMBOL(ib_register_mad_snoop);static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv){	struct ib_mad_port_private *port_priv;	unsigned long flags;	/* Note that we could still be handling received MADs */	/*	 * Canceling all sends results in dropping received response	 * MADs, preventing us from queuing additional work	 */	cancel_mads(mad_agent_priv);	port_priv = mad_agent_priv->qp_info->port_priv;	cancel_delayed_work(&mad_agent_priv->timed_work);	flush_workqueue(port_priv->wq);	spin_lock_irqsave(&port_priv->reg_lock, flags);	remove_mad_reg_req(mad_agent_priv);	list_del(&mad_agent_priv->agent_list);	spin_unlock_irqrestore(&port_priv->reg_lock, flags);	/* XXX: Cleanup pending RMPP receives for this agent */	atomic_dec(&mad_agent_priv->refcount);	wait_event(mad_agent_priv->wait,		   !atomic_read(&mad_agent_priv->refcount));	if (mad_agent_priv->reg_req)		kfree(mad_agent_priv->reg_req);	kfree(mad_agent_priv);}static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv){	struct ib_mad_qp_info *qp_info;	unsigned long flags;	qp_info = mad_snoop_priv->qp_info;	spin_lock_irqsave(&qp_info->snoop_lock, flags);	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;	atomic_dec(&qp_info->snoop_count);	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);	atomic_dec(&mad_snoop_priv->refcount);	wait_event(mad_snoop_priv->wait,		   !atomic_read(&mad_snoop_priv->refcount));	kfree(mad_snoop_priv);}/* * ib_unregister_mad_agent - Unregisters a client from using MAD services */int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_snoop_private *mad_snoop_priv;	/* If the TID is zero, the agent can only snoop. */	if (mad_agent->hi_tid) {		mad_agent_priv = container_of(mad_agent,					      struct ib_mad_agent_private,					      agent);		unregister_mad_agent(mad_agent_priv);	} else {		mad_snoop_priv = container_of(mad_agent,					      struct ib_mad_snoop_private,					      agent);		unregister_mad_snoop(mad_snoop_priv);	}	return 0;}EXPORT_SYMBOL(ib_unregister_mad_agent);static void dequeue_mad(struct ib_mad_list_head *mad_list){	struct ib_mad_queue *mad_queue;	unsigned long flags;	BUG_ON(!mad_list->mad_queue);	mad_queue = mad_list->mad_queue;	spin_lock_irqsave(&mad_queue->lock, flags);	list_del(&mad_list->list);	mad_queue->count--;	spin_unlock_irqrestore(&mad_queue->lock, flags);}static void snoop_send(struct ib_mad_qp_info *qp_info,		       struct ib_send_wr *send_wr,		       struct ib_mad_send_wc *mad_send_wc,		       int mad_snoop_flags){	struct ib_mad_snoop_private *mad_snoop_priv;	unsigned long flags;	int i;	spin_lock_irqsave(&qp_info->snoop_lock, flags);	for (i = 0; i < qp_info->snoop_table_size; i++) {		mad_snoop_priv = qp_info->snoop_table[i];		if (!mad_snoop_priv ||		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))			continue;		atomic_inc(&mad_snoop_priv->refcount);		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,						    send_wr, mad_send_wc);		if (atomic_dec_and_test(&mad_snoop_priv->refcount))			wake_up(&mad_snoop_priv->wait);		spin_lock_irqsave(&qp_info->snoop_lock, flags);	}	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);}static void snoop_recv(struct ib_mad_qp_info *qp_info,		       struct ib_mad_recv_wc *mad_recv_wc,		       int mad_snoop_flags){	struct ib_mad_snoop_private *mad_snoop_priv;	unsigned long flags;	int i;	spin_lock_irqsave(&qp_info->snoop_lock, flags);	for (i = 0; i < qp_info->snoop_table_size; i++) {		mad_snoop_priv = qp_info->snoop_table[i];		if (!mad_snoop_priv ||		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))			continue;		atomic_inc(&mad_snoop_priv->refcount);		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,						   mad_recv_wc);		if (atomic_dec_and_test(&mad_snoop_priv->refcount))			wake_up(&mad_snoop_priv->wait);		spin_lock_irqsave(&qp_info->snoop_lock, flags);	}	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);}static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,			 struct ib_wc *wc){	memset(wc, 0, sizeof *wc);	wc->wr_id = wr_id;	wc->status = IB_WC_SUCCESS;	wc->opcode = IB_WC_RECV;	wc->pkey_index = pkey_index;	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);	wc->src_qp = IB_QP0;	wc->qp_num = IB_QP0;	wc->slid = slid;	wc->sl = 0;	wc->dlid_path_bits = 0;	wc->port_num = port_num;}/* * Return 0 if SMP is to be sent * Return 1 if SMP was consumed locally (whether or not solicited) * Return < 0 if error */static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,				  struct ib_smp *smp,				  struct ib_send_wr *send_wr){	int ret, alloc_flags, solicited;	unsigned long flags;	struct ib_mad_local_private *local;	struct ib_mad_private *mad_priv;	struct ib_mad_port_private *port_priv;	struct ib_mad_agent_private *recv_mad_agent = NULL;	struct ib_device *device = mad_agent_priv->agent.device;	u8 port_num = mad_agent_priv->agent.port_num;	struct ib_wc mad_wc;	if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {		ret = -EINVAL;		printk(KERN_ERR PFX "Invalid directed route\n");		goto out;	}	/* Check to post send on QP or process locally */	ret = smi_check_local_dr_smp(smp, device, port_num);	if (!ret || !device->process_mad)		goto out;	if (in_atomic() || irqs_disabled())		alloc_flags = GFP_ATOMIC;	else		alloc_flags = GFP_KERNEL;	local = kmalloc(sizeof *local, alloc_flags);	if (!local) {		ret = -ENOMEM;		printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");		goto out;	}	local->mad_priv = NULL;	local->recv_mad_agent = NULL;	mad_priv = kmem_cache_alloc(ib_mad_cache, alloc_flags);	if (!mad_priv) {		ret = -ENOMEM;		printk(KERN_ERR PFX "No memory for local response MAD\n");		kfree(local);		goto out;	}	build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,		     send_wr->wr.ud.port_num, &mad_wc);	/* No GRH for DR SMP */	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,				  (struct ib_mad *)smp,				  (struct ib_mad *)&mad_priv->mad);	switch (ret)	{	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:		/*		 * See if response is solicited and		 * there is a recv handler		 */		if (solicited_mad(&mad_priv->mad.mad) &&		    mad_agent_priv->agent.recv_handler) {			local->mad_priv = mad_priv;			local->recv_mad_agent = mad_agent_priv;			/*			 * Reference MAD agent until receive			 * side of local completion handled			 */			atomic_inc(&mad_agent_priv->refcount);		} else			kmem_cache_free(ib_mad_cache, mad_priv);		break;	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:		kmem_cache_free(ib_mad_cache, mad_priv);		break;	case IB_MAD_RESULT_SUCCESS:		/* Treat like an incoming receive MAD */		solicited = solicited_mad(&mad_priv->mad.mad);		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,					    mad_agent_priv->agent.port_num);		if (port_priv) {			mad_priv->mad.mad.mad_hdr.tid =				((struct ib_mad *)smp)->mad_hdr.tid;			recv_mad_agent = find_mad_agent(port_priv,						       &mad_priv->mad.mad,							solicited);		}		if (!port_priv || !recv_mad_agent) {			kmem_cache_free(ib_mad_cache, mad_priv);			kfree(local);			ret = 0;			goto out;		}		local->mad_priv = mad_priv;		local->recv_mad_agent = recv_mad_agent;		break;	default:		kmem_cache_free(ib_mad_cache, mad_priv);		kfree(local);		ret = -EINVAL;		goto out;	}	local->send_wr = *send_wr;	local->send_wr.sg_list = local->sg_list;	memcpy(local->sg_list, send_wr->sg_list,	       sizeof *send_wr->sg_list * send_wr->num_sge);	local->send_wr.next = NULL;	local->tid = send_wr->wr.ud.mad_hdr->tid;	local->wr_id = send_wr->wr_id;	/* Reference MAD agent until send side of local completion handled */	atomic_inc(&mad_agent_priv->refcount);	/* Queue local completion to local list */	spin_lock_irqsave(&mad_agent_priv->lock, flags);	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	queue_work(mad_agent_priv->qp_info->port_priv->wq,		  &mad_agent_priv->local_work);	ret = 1;out:	return ret;}static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv,		       struct ib_mad_send_wr_private *mad_send_wr){	struct ib_mad_qp_info *qp_info;	struct ib_send_wr *bad_send_wr;	unsigned long flags;	int ret;	/* Replace user's WR ID with our own to find WR upon completion */	qp_info = mad_agent_priv->qp_info;	mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;	mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;	spin_lock_irqsave(&qp_info->send_queue.lock, flags);	if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {		list_add_tail(&mad_send_wr->mad_list.list,			      &qp_info->send_queue.list);		spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);		ret = ib_post_send(mad_agent_priv->agent.qp,				   &mad_send_wr->send_wr, &bad_send_wr);		if (ret) {			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);			dequeue_mad(&mad_send_wr->mad_list);		}	} else {		list_add_tail(&mad_send_wr->mad_list.list,			      &qp_info->overflow_list);		spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);		ret = 0;	}	return ret;}/* * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated *  with the registered client */int ib_post_send_mad(struct ib_mad_agent *mad_agent,		     struct ib_send_wr *send_wr,		     struct ib_send_wr **bad_send_wr){	int ret = -EINVAL;	struct ib_mad_agent_private *mad_agent_priv;	/* Validate supplied parameters */	if (!bad_send_wr)		goto error1;	if (!mad_agent || !send_wr)		goto error2;	if (!mad_agent->send_handler)		goto error2;	mad_agent_priv = container_of(mad_agent,				      struct ib_mad_agent_private,				      agent);	/* Walk list of send WRs and post each on send list */	while (send_wr) {		unsigned long			flags;		struct ib_send_wr		*next_send_wr;		struct ib_mad_send_wr_private	*mad_send_wr;		struct ib_smp			*smp;		/* Validate more parameters */		if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)			goto error2;		if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)			goto error2;		if (!send_wr->wr.ud.mad_hdr) {			printk(KERN_ERR PFX "MAD header must be supplied "			       "in WR %p\n", send_wr);			goto error2;		}		/*		 * Save pointer to next work request to post in case the		 * current one completes, and the user modifies the work		 * request associated with the completion		 */		next_send_wr = (struct ib_send_wr *)send_wr->next;		smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;		if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {			ret = handle_outgoing_dr_smp(mad_agent_priv, smp,						     send_wr);			if (ret < 0)		/* error */				goto error2;			else if (ret == 1)	/* locally consumed */				goto next;		}		/* Allocate MAD send WR tracking structure */		mad_send_wr = kmalloc(sizeof *mad_send_wr,				      (in_atomic() || irqs_disabled()) ?				      GFP_ATOMIC : GFP_KERNEL);		if (!mad_send_wr) {			printk(KERN_ERR PFX "No memory for "			       "ib_mad_send_wr_private\n");			ret = -ENOMEM;			goto error2;		}		mad_send_wr->send_wr = *send_wr;		mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;		memcpy(mad_send_wr->sg_list, send_wr->sg_list,		       sizeof *send_wr->sg_list * send_wr->num_sge);		mad_send_wr->send_wr.next = NULL;		mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;		mad_send_wr->agent = mad_agent;		/* Timeout will be updated after send completes */		mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.							ud.timeout_ms);		mad_send_wr->retry = 0;		/* One reference for each work request to QP + response */		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);		mad_send_wr->status = IB_WC_SUCCESS;		/* Reference MAD agent until send completes */		atomic_inc(&mad_agent_priv->refcount);		spin_lock_irqsave(&mad_agent_priv->lock, flags);		list_add_tail(&mad_send_wr->agent_list,			      &mad_agent_priv->send_list);		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		ret = ib_send_mad(mad_agent_priv, mad_send_wr);		if (ret) {			/* Fail send request */			spin_lock_irqsave(&mad_agent_priv->lock, flags);			list_del(&mad_send_wr->agent_list);			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);			atomic_dec(&mad_agent_priv->refcount);			goto error2;		}next:		send_wr = next_send_wr;	}	return 0;error2:	*bad_send_wr = send_wr;error1:	return ret;}EXPORT_SYMBOL(ib_post_send_mad);/* * ib_free_recv_mad - Returns data buffers used to receive *  a MAD to the access layer */void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc){	struct ib_mad_recv_buf *entry;	struct ib_mad_private_header *mad_priv_hdr;	struct ib_mad_private *priv;	mad_priv_hdr = container_of(mad_recv_wc,				    struct ib_mad_private_header,				    recv_wc);	priv = container_of(mad_priv_hdr, struct ib_mad_private, header);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -