mad.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 2,279 行 · 第 1/5 页

C
2,279
字号
			 sizeof(struct ib_mad_private) -			 sizeof(struct ib_mad_private_header),			 DMA_FROM_DEVICE);	/* Setup MAD receive work completion from "normal" work completion */	recv->header.wc = *wc;	recv->header.recv_wc.wc = &recv->header.wc;	recv->header.recv_wc.mad_len = sizeof(struct ib_mad);	recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;	recv->header.recv_wc.recv_buf.grh = &recv->grh;	if (atomic_read(&qp_info->snoop_count))		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);	/* Validate MAD */	if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))		goto out;	if (recv->mad.mad.mad_hdr.mgmt_class ==	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {		if (!smi_handle_dr_smp_recv(&recv->mad.smp,					    port_priv->device->node_type,					    port_priv->port_num,					    port_priv->device->phys_port_cnt))			goto out;		if (!smi_check_forward_dr_smp(&recv->mad.smp))			goto local;		if (!smi_handle_dr_smp_send(&recv->mad.smp,					    port_priv->device->node_type,					    port_priv->port_num))			goto out;		if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))			goto out;	}local:	/* Give driver "right of first refusal" on incoming MAD */	if (port_priv->device->process_mad) {		int ret;		if (!response) {			printk(KERN_ERR PFX "No memory for response MAD\n");			/*			 * Is it better to assume that			 * it wouldn't be processed ?			 */			goto out;		}		ret = port_priv->device->process_mad(port_priv->device, 0,						     port_priv->port_num,						     wc, &recv->grh,						     &recv->mad.mad,						     &response->mad.mad);		if (ret & IB_MAD_RESULT_SUCCESS) {			if (ret & IB_MAD_RESULT_CONSUMED)				goto out;			if (ret & IB_MAD_RESULT_REPLY) {				agent_send_response(&response->mad.mad,						    &recv->grh, wc,						    port_priv->device,						    port_priv->port_num,						    qp_info->qp->qp_num);				goto out;			}		}	}	mad_agent = find_mad_agent(port_priv, &recv->mad.mad);	if (mad_agent) {		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);		/*		 * recv is freed up in error cases in ib_mad_complete_recv		 * or via recv_handler in ib_mad_complete_recv()		 */		recv = NULL;	}out:	/* Post another receive request for this QP */	if (response) {		ib_mad_post_receive_mads(qp_info, response);		if (recv)			kmem_cache_free(ib_mad_cache, recv);	} else		ib_mad_post_receive_mads(qp_info, recv);}static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv){	struct ib_mad_send_wr_private *mad_send_wr;	unsigned long delay;	if (list_empty(&mad_agent_priv->wait_list)) {		cancel_delayed_work(&mad_agent_priv->timed_work);	} else {		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,					 struct ib_mad_send_wr_private,					 agent_list);		if (time_after(mad_agent_priv->timeout,			       mad_send_wr->timeout)) {			mad_agent_priv->timeout = mad_send_wr->timeout;			cancel_delayed_work(&mad_agent_priv->timed_work);			delay = mad_send_wr->timeout - jiffies;			if ((long)delay <= 0)				delay = 1;			queue_delayed_work(mad_agent_priv->qp_info->					   port_priv->wq,					   &mad_agent_priv->timed_work, delay);		}	}}static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_send_wr_private *temp_mad_send_wr;	struct list_head *list_item;	unsigned long delay;	mad_agent_priv = mad_send_wr->mad_agent_priv;	list_del(&mad_send_wr->agent_list);	delay = mad_send_wr->timeout;	mad_send_wr->timeout += jiffies;	if (delay) {		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {			temp_mad_send_wr = list_entry(list_item,						struct ib_mad_send_wr_private,						agent_list);			if (time_after(mad_send_wr->timeout,				       temp_mad_send_wr->timeout))				break;		}	}	else		list_item = &mad_agent_priv->wait_list;	list_add(&mad_send_wr->agent_list, list_item);	/* Reschedule a work item if we have a shorter timeout */	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {		cancel_delayed_work(&mad_agent_priv->timed_work);		queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,				   &mad_agent_priv->timed_work, delay);	}}void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,			  int timeout_ms){	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);	wait_for_response(mad_send_wr);}/* * Process a send work completion */void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,			     struct ib_mad_send_wc *mad_send_wc){	struct ib_mad_agent_private	*mad_agent_priv;	unsigned long			flags;	int				ret;	mad_agent_priv = mad_send_wr->mad_agent_priv;	spin_lock_irqsave(&mad_agent_priv->lock, flags);	if (mad_agent_priv->agent.rmpp_version) {		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);		if (ret == IB_RMPP_RESULT_CONSUMED)			goto done;	} else		ret = IB_RMPP_RESULT_UNHANDLED;	if (mad_send_wc->status != IB_WC_SUCCESS &&	    mad_send_wr->status == IB_WC_SUCCESS) {		mad_send_wr->status = mad_send_wc->status;		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);	}	if (--mad_send_wr->refcount > 0) {		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&		    mad_send_wr->status == IB_WC_SUCCESS) {			wait_for_response(mad_send_wr);		}		goto done;	}	/* Remove send from MAD agent and notify client of completion */	list_del(&mad_send_wr->agent_list);	adjust_timeout(mad_agent_priv);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	if (mad_send_wr->status != IB_WC_SUCCESS )		mad_send_wc->status = mad_send_wr->status;	if (ret == IB_RMPP_RESULT_INTERNAL)		ib_rmpp_send_handler(mad_send_wc);	else		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,						   mad_send_wc);	/* Release reference on agent taken when sending */	deref_mad_agent(mad_agent_priv);	return;done:	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);}static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,				     struct ib_wc *wc){	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;	struct ib_mad_list_head		*mad_list;	struct ib_mad_qp_info		*qp_info;	struct ib_mad_queue		*send_queue;	struct ib_send_wr		*bad_send_wr;	struct ib_mad_send_wc		mad_send_wc;	unsigned long flags;	int ret;	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,				   mad_list);	send_queue = mad_list->mad_queue;	qp_info = send_queue->qp_info;retry:	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,			 pci_unmap_addr(mad_send_wr, header_mapping),			 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,			 pci_unmap_addr(mad_send_wr, payload_mapping),			 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);	queued_send_wr = NULL;	spin_lock_irqsave(&send_queue->lock, flags);	list_del(&mad_list->list);	/* Move queued send to the send queue */	if (send_queue->count-- > send_queue->max_active) {		mad_list = container_of(qp_info->overflow_list.next,					struct ib_mad_list_head, list);		queued_send_wr = container_of(mad_list,					struct ib_mad_send_wr_private,					mad_list);		list_del(&mad_list->list);		list_add_tail(&mad_list->list, &send_queue->list);	}	spin_unlock_irqrestore(&send_queue->lock, flags);	mad_send_wc.send_buf = &mad_send_wr->send_buf;	mad_send_wc.status = wc->status;	mad_send_wc.vendor_err = wc->vendor_err;	if (atomic_read(&qp_info->snoop_count))		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,			   IB_MAD_SNOOP_SEND_COMPLETIONS);	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);	if (queued_send_wr) {		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,				   &bad_send_wr);		if (ret) {			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);			mad_send_wr = queued_send_wr;			wc->status = IB_WC_LOC_QP_OP_ERR;			goto retry;		}	}}static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info){	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad_list_head *mad_list;	unsigned long flags;	spin_lock_irqsave(&qp_info->send_queue.lock, flags);	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {		mad_send_wr = container_of(mad_list,					   struct ib_mad_send_wr_private,					   mad_list);		mad_send_wr->retry = 1;	}	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);}static void mad_error_handler(struct ib_mad_port_private *port_priv,			      struct ib_wc *wc){	struct ib_mad_list_head *mad_list;	struct ib_mad_qp_info *qp_info;	struct ib_mad_send_wr_private *mad_send_wr;	int ret;	/* Determine if failure was a send or receive */	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	qp_info = mad_list->mad_queue->qp_info;	if (mad_list->mad_queue == &qp_info->recv_queue)		/*		 * Receive errors indicate that the QP has entered the error		 * state - error handling/shutdown code will cleanup		 */		return;	/*	 * Send errors will transition the QP to SQE - move	 * QP to RTS and repost flushed work requests	 */	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,				   mad_list);	if (wc->status == IB_WC_WR_FLUSH_ERR) {		if (mad_send_wr->retry) {			/* Repost send */			struct ib_send_wr *bad_send_wr;			mad_send_wr->retry = 0;			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,					&bad_send_wr);			if (ret)				ib_mad_send_done_handler(port_priv, wc);		} else			ib_mad_send_done_handler(port_priv, wc);	} else {		struct ib_qp_attr *attr;		/* Transition QP to RTS and fail offending send */		attr = kmalloc(sizeof *attr, GFP_KERNEL);		if (attr) {			attr->qp_state = IB_QPS_RTS;			attr->cur_qp_state = IB_QPS_SQE;			ret = ib_modify_qp(qp_info->qp, attr,					   IB_QP_STATE | IB_QP_CUR_STATE);			kfree(attr);			if (ret)				printk(KERN_ERR PFX "mad_error_handler - "				       "ib_modify_qp to RTS : %d\n", ret);			else				mark_sends_for_retry(qp_info);		}		ib_mad_send_done_handler(port_priv, wc);	}}/* * IB MAD completion callback */static void ib_mad_completion_handler(void *data){	struct ib_mad_port_private *port_priv;	struct ib_wc wc;	port_priv = (struct ib_mad_port_private *)data;	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {		if (wc.status == IB_WC_SUCCESS) {			switch (wc.opcode) {			case IB_WC_SEND:				ib_mad_send_done_handler(port_priv, &wc);				break;			case IB_WC_RECV:				ib_mad_recv_done_handler(port_priv, &wc);				break;			default:				BUG_ON(1);				break;			}		} else			mad_error_handler(port_priv, &wc);	}}static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv){	unsigned long flags;	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;	struct ib_mad_send_wc mad_send_wc;	struct list_head cancel_list;	INIT_LIST_HEAD(&cancel_list);	spin_lock_irqsave(&mad_agent_priv->lock, flags);	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,				 &mad_agent_priv->send_list, agent_list) {		if (mad_send_wr->status == IB_WC_SUCCESS) { 			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);		}	}	/* Empty wait list to prevent receives from finding a request */	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);	/* Empty local completion list as well */	list_splice_init(&mad_agent_priv->local_list, &cancel_list);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	/* Report all cancelled requests */	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;	mad_send_wc.vendor_err = 0;	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,				 &cancel_list, agent_list) {		mad_send_wc.send_buf = &mad_send_wr->send_buf;		list_del(&mad_send_wr->agent_list);		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,						   &mad_send_wc);		atomic_dec(&mad_agent_priv->refcount);	}}static struct ib_mad_send_wr_private*find_send_wr(struct ib_mad_agent_private *mad_agent_priv,	     struct ib_mad_send_buf *send_buf){	struct ib_mad_send_wr_private *mad_send_wr;	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,			    agent_list) {		if (&mad_send_wr->send_buf == send_buf)			return mad_send_wr;	}	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,			    agent_list) {		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&		    &mad_send_wr->send_buf == send_buf)			return mad_send_wr;	}	return NULL;}int ib_modify_mad(struct ib_mad_agent *mad_agent,		  struct ib_mad_send_buf *send_buf, u32 timeout_ms){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_send_wr_private *mad_send_wr;	unsigned long flags;	int active;	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,				      agent);	spin_lock_irqsave(&mad_agent_priv->lock, flags);	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		return -EINVAL;	}	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);	if (!timeout_ms) {		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);	}	mad_s

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?