⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mad.c

📁 h内核
💻 C
📖 第 1 页 / 共 5 页
字号:
				break;			}		}	} else {		struct ib_mad_mgmt_class_table *class;		struct ib_mad_mgmt_method_table *method;		struct ib_mad_mgmt_vendor_class_table *vendor;		struct ib_mad_mgmt_vendor_class *vendor_class;		struct ib_vendor_mad *vendor_mad;		int index;		/*		 * Routing is based on version, class, and method		 * For "newer" vendor MADs, also based on OUI		 */		if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)			goto out;		if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {			class = port_priv->version[					mad->mad_hdr.class_version].class;			if (!class)				goto out;			method = class->method_table[convert_mgmt_class(							mad->mad_hdr.mgmt_class)];			if (method)				mad_agent = method->agent[mad->mad_hdr.method &							  ~IB_MGMT_METHOD_RESP];		} else {			vendor = port_priv->version[					mad->mad_hdr.class_version].vendor;			if (!vendor)				goto out;			vendor_class = vendor->vendor_class[vendor_class_index(						mad->mad_hdr.mgmt_class)];			if (!vendor_class)				goto out;			/* Find matching OUI */			vendor_mad = (struct ib_vendor_mad *)mad;			index = find_vendor_oui(vendor_class, vendor_mad->oui);			if (index == -1)				goto out;			method = vendor_class->method_table[index];			if (method) {				mad_agent = method->agent[mad->mad_hdr.method &							  ~IB_MGMT_METHOD_RESP];			}		}	}	if (mad_agent) {		if (mad_agent->agent.recv_handler)			atomic_inc(&mad_agent->refcount);		else {			printk(KERN_NOTICE PFX "No receive handler for client "			       "%p on port %d\n",			       &mad_agent->agent, port_priv->port_num);			mad_agent = NULL;		}	}out:	spin_unlock_irqrestore(&port_priv->reg_lock, flags);	return mad_agent;}static int validate_mad(struct ib_mad *mad, u32 qp_num){	int valid = 0;	/* Make sure MAD base version is understood */	if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {		printk(KERN_ERR PFX "MAD received with unsupported base "		       "version %d\n", mad->mad_hdr.base_version);		goto out;	}	/* Filter SMI packets sent to other than QP0 */	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||	    (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {		if (qp_num == 0)			valid = 1;	} else {		/* Filter GSI packets sent to QP0 */		if (qp_num != 0)			valid = 1;	}out:	return valid;}/* * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet */static struct ib_mad_private *reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,		struct ib_mad_private *recv){	/* Until we have RMPP, all receives are reassembled!... */	INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list);	return recv;}static struct ib_mad_send_wr_private*find_send_req(struct ib_mad_agent_private *mad_agent_priv,	      u64 tid){	struct ib_mad_send_wr_private *mad_send_wr;	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,			    agent_list) {		if (mad_send_wr->tid == tid)			return mad_send_wr;	}	/*	 * It's possible to receive the response before we've	 * been notified that the send has completed	 */	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,			    agent_list) {		if (mad_send_wr->tid == tid && mad_send_wr->timeout) {			/* Verify request has not been canceled */			return (mad_send_wr->status == IB_WC_SUCCESS) ?				mad_send_wr : NULL;		}	}	return NULL;}static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,				 struct ib_mad_private *recv,				 int solicited){	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad_send_wc mad_send_wc;	unsigned long flags;	/* Fully reassemble receive before processing */	recv = reassemble_recv(mad_agent_priv, recv);	if (!recv) {		if (atomic_dec_and_test(&mad_agent_priv->refcount))			wake_up(&mad_agent_priv->wait);		return;	}	/* Complete corresponding request */	if (solicited) {		spin_lock_irqsave(&mad_agent_priv->lock, flags);		mad_send_wr = find_send_req(mad_agent_priv,					    recv->mad.mad.mad_hdr.tid);		if (!mad_send_wr) {			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);			ib_free_recv_mad(&recv->header.recv_wc);			if (atomic_dec_and_test(&mad_agent_priv->refcount))				wake_up(&mad_agent_priv->wait);			return;		}		/* Timeout = 0 means that we won't wait for a response */		mad_send_wr->timeout = 0;		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		/* Defined behavior is to complete response before request */		recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id;		mad_agent_priv->agent.recv_handler(						&mad_agent_priv->agent,						&recv->header.recv_wc);		atomic_dec(&mad_agent_priv->refcount);		mad_send_wc.status = IB_WC_SUCCESS;		mad_send_wc.vendor_err = 0;		mad_send_wc.wr_id = mad_send_wr->wr_id;		ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);	} else {		mad_agent_priv->agent.recv_handler(						&mad_agent_priv->agent,						&recv->header.recv_wc);		if (atomic_dec_and_test(&mad_agent_priv->refcount))			wake_up(&mad_agent_priv->wait);	}}static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,				     struct ib_wc *wc){	struct ib_mad_qp_info *qp_info;	struct ib_mad_private_header *mad_priv_hdr;	struct ib_mad_private *recv, *response;	struct ib_mad_list_head *mad_list;	struct ib_mad_agent_private *mad_agent;	int solicited;	response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);	if (!response)		printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "		       "for response buffer\n");	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	qp_info = mad_list->mad_queue->qp_info;	dequeue_mad(mad_list);	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,				    mad_list);	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);	dma_unmap_single(port_priv->device->dma_device,			 pci_unmap_addr(&recv->header, mapping),			 sizeof(struct ib_mad_private) -			 sizeof(struct ib_mad_private_header),			 DMA_FROM_DEVICE);	/* Setup MAD receive work completion from "normal" work completion */	recv->header.recv_wc.wc = wc;	recv->header.recv_wc.mad_len = sizeof(struct ib_mad);	recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;	recv->header.recv_wc.recv_buf.grh = &recv->grh;	if (atomic_read(&qp_info->snoop_count))		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);	/* Validate MAD */	if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))		goto out;	if (recv->mad.mad.mad_hdr.mgmt_class ==	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {		if (!smi_handle_dr_smp_recv(&recv->mad.smp,					    port_priv->device->node_type,					    port_priv->port_num,					    port_priv->device->phys_port_cnt))			goto out;		if (!smi_check_forward_dr_smp(&recv->mad.smp))			goto local;		if (!smi_handle_dr_smp_send(&recv->mad.smp,					    port_priv->device->node_type,					    port_priv->port_num))			goto out;		if (!smi_check_local_dr_smp(&recv->mad.smp,					    port_priv->device,					    port_priv->port_num))			goto out;	}local:	/* Give driver "right of first refusal" on incoming MAD */	if (port_priv->device->process_mad) {		int ret;		if (!response) {			printk(KERN_ERR PFX "No memory for response MAD\n");			/*			 * Is it better to assume that			 * it wouldn't be processed ?			 */			goto out;		}		ret = port_priv->device->process_mad(port_priv->device, 0,						     port_priv->port_num,						     wc, &recv->grh,						     &recv->mad.mad,						     &response->mad.mad);		if (ret & IB_MAD_RESULT_SUCCESS) {			if (ret & IB_MAD_RESULT_CONSUMED)				goto out;			if (ret & IB_MAD_RESULT_REPLY) {				/* Send response */				if (!agent_send(response, &recv->grh, wc,						port_priv->device,						port_priv->port_num))					response = NULL;				goto out;			}		}	}	/* Determine corresponding MAD agent for incoming receive MAD */	solicited = solicited_mad(&recv->mad.mad);	mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited);	if (mad_agent) {		ib_mad_complete_recv(mad_agent, recv, solicited);		/*		 * recv is freed up in error cases in ib_mad_complete_recv		 * or via recv_handler in ib_mad_complete_recv()		 */		recv = NULL;	}out:	/* Post another receive request for this QP */	if (response) {		ib_mad_post_receive_mads(qp_info, response);		if (recv)			kmem_cache_free(ib_mad_cache, recv);	} else		ib_mad_post_receive_mads(qp_info, recv);}static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv){	struct ib_mad_send_wr_private *mad_send_wr;	unsigned long delay;	if (list_empty(&mad_agent_priv->wait_list)) {		cancel_delayed_work(&mad_agent_priv->timed_work);	} else {		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,					 struct ib_mad_send_wr_private,					 agent_list);		if (time_after(mad_agent_priv->timeout,			       mad_send_wr->timeout)) {			mad_agent_priv->timeout = mad_send_wr->timeout;			cancel_delayed_work(&mad_agent_priv->timed_work);			delay = mad_send_wr->timeout - jiffies;			if ((long)delay <= 0)				delay = 1;			queue_delayed_work(mad_agent_priv->qp_info->					   port_priv->wq,					   &mad_agent_priv->timed_work, delay);		}	}}static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv,			      struct ib_mad_send_wr_private *mad_send_wr ){	struct ib_mad_send_wr_private *temp_mad_send_wr;	struct list_head *list_item;	unsigned long delay;	list_del(&mad_send_wr->agent_list);	delay = mad_send_wr->timeout;	mad_send_wr->timeout += jiffies;	list_for_each_prev(list_item, &mad_agent_priv->wait_list) {		temp_mad_send_wr = list_entry(list_item,					      struct ib_mad_send_wr_private,					      agent_list);		if (time_after(mad_send_wr->timeout,			       temp_mad_send_wr->timeout))			break;	}	list_add(&mad_send_wr->agent_list, list_item);	/* Reschedule a work item if we have a shorter timeout */	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {		cancel_delayed_work(&mad_agent_priv->timed_work);		queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,				   &mad_agent_priv->timed_work, delay);	}}/* * Process a send work completion */static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,				    struct ib_mad_send_wc *mad_send_wc){	struct ib_mad_agent_private	*mad_agent_priv;	unsigned long			flags;	mad_agent_priv = container_of(mad_send_wr->agent,				      struct ib_mad_agent_private, agent);	spin_lock_irqsave(&mad_agent_priv->lock, flags);	if (mad_send_wc->status != IB_WC_SUCCESS &&	    mad_send_wr->status == IB_WC_SUCCESS) {		mad_send_wr->status = mad_send_wc->status;		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);	}	if (--mad_send_wr->refcount > 0) {		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&		    mad_send_wr->status == IB_WC_SUCCESS) {			wait_for_response(mad_agent_priv, mad_send_wr);		}		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		return;	}	/* Remove send from MAD agent and notify client of completion */	list_del(&mad_send_wr->agent_list);	adjust_timeout(mad_agent_priv);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	if (mad_send_wr->status != IB_WC_SUCCESS )		mad_send_wc->status = mad_send_wr->status;	mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,					    mad_send_wc);	/* Release reference on agent taken when sending */	if (atomic_dec_and_test(&mad_agent_priv->refcount))		wake_up(&mad_agent_priv->wait);	kfree(mad_send_wr);}static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,				     struct ib_wc *wc){	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;	struct ib_mad_list_head		*mad_list;	struct ib_mad_qp_info		*qp_info;	struct ib_mad_queue		*send_queue;	struct ib_send_wr		*bad_send_wr;	unsigned long flags;	int ret;	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,				   mad_list);	send_queue = mad_list->mad_queue;	qp_info = send_queue->qp_info;retry:	queued_send_wr = NULL;	spin_lock_irqsave(&send_queue->lock, flags);	list_del(&mad_list->list);	/* Move queued send to the send queue */	if (send_queue->count-- > send_queue->max_active) {		mad_list = container_of(qp_info->overflow_list.next,					struct ib_mad_list_head, list);		queued_send_wr = container_of(mad_list,					struct ib_mad_send_wr_private,					mad_list);		list_del(&mad_list->list);		list_add_tail(&mad_list->list, &send_queue->list);	}	spin_unlock_irqrestore(&send_queue->lock, flags);	/* Restore client wr_id in WC and complete send */	wc->wr_id = mad_send_wr->wr_id;	if (atomic_read(&qp_info->snoop_count))		snoop_send(qp_info, &mad_send_wr->send_wr,			   (struct ib_mad_send_wc *)wc,			   IB_MAD_SNOOP_SEND_COMPLETIONS);	ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);	if (queued_send_wr) {		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,				&bad_send_wr);		if (ret) {			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);			mad_send_wr = queued_send_wr;			wc->status = IB_WC_LOC_QP_OP_ERR;			goto retry;		}	}}static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info){	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad_list_head *mad_list;	unsigned long flags;	spin_lock_irqsave(&qp_info->send_queue.lock, flags);	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {		mad_send_wr = container_of(mad_list,					   struct ib_mad_send_wr_private,					   mad_list);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -