⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mad.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
	struct ib_mad_list_head		*mad_list;	struct ib_mad_qp_info		*qp_info;	struct ib_mad_queue		*send_queue;	struct ib_send_wr		*bad_send_wr;	struct ib_mad_send_wc		mad_send_wc;	unsigned long flags;	int ret;	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,				   mad_list);	send_queue = mad_list->mad_queue;	qp_info = send_queue->qp_info;retry:	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,			 pci_unmap_addr(mad_send_wr, mapping),			 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);	queued_send_wr = NULL;	spin_lock_irqsave(&send_queue->lock, flags);	list_del(&mad_list->list);	/* Move queued send to the send queue */	if (send_queue->count-- > send_queue->max_active) {		mad_list = container_of(qp_info->overflow_list.next,					struct ib_mad_list_head, list);		queued_send_wr = container_of(mad_list,					struct ib_mad_send_wr_private,					mad_list);		list_del(&mad_list->list);		list_add_tail(&mad_list->list, &send_queue->list);	}	spin_unlock_irqrestore(&send_queue->lock, flags);	mad_send_wc.send_buf = &mad_send_wr->send_buf;	mad_send_wc.status = wc->status;	mad_send_wc.vendor_err = wc->vendor_err;	if (atomic_read(&qp_info->snoop_count))		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,			   IB_MAD_SNOOP_SEND_COMPLETIONS);	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);	if (queued_send_wr) {		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,				   &bad_send_wr);		if (ret) {			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);			mad_send_wr = queued_send_wr;			wc->status = IB_WC_LOC_QP_OP_ERR;			goto retry;		}	}}static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info){	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad_list_head *mad_list;	unsigned long flags;	spin_lock_irqsave(&qp_info->send_queue.lock, flags);	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {		mad_send_wr = container_of(mad_list,					   struct ib_mad_send_wr_private,					   mad_list);		mad_send_wr->retry = 1;	}	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);}static void mad_error_handler(struct ib_mad_port_private *port_priv,			      struct ib_wc *wc){	struct ib_mad_list_head *mad_list;	struct ib_mad_qp_info *qp_info;	struct ib_mad_send_wr_private *mad_send_wr;	int ret;	/* Determine if failure was a send or receive */	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	qp_info = mad_list->mad_queue->qp_info;	if (mad_list->mad_queue == &qp_info->recv_queue)		/*		 * Receive errors indicate that the QP has entered the error		 * state - error handling/shutdown code will cleanup		 */		return;	/*	 * Send errors will transition the QP to SQE - move	 * QP to RTS and repost flushed work requests	 */	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,				   mad_list);	if (wc->status == IB_WC_WR_FLUSH_ERR) {		if (mad_send_wr->retry) {			/* Repost send */			struct ib_send_wr *bad_send_wr;			mad_send_wr->retry = 0;			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,					&bad_send_wr);			if (ret)				ib_mad_send_done_handler(port_priv, wc);		} else			ib_mad_send_done_handler(port_priv, wc);	} else {		struct ib_qp_attr *attr;		/* Transition QP to RTS and fail offending send */		attr = kmalloc(sizeof *attr, GFP_KERNEL);		if (attr) {			attr->qp_state = IB_QPS_RTS;			attr->cur_qp_state = IB_QPS_SQE;			ret = ib_modify_qp(qp_info->qp, attr,					   IB_QP_STATE | IB_QP_CUR_STATE);			kfree(attr);			if (ret)				printk(KERN_ERR PFX "mad_error_handler - "				       "ib_modify_qp to RTS : %d\n", ret);			else				mark_sends_for_retry(qp_info);		}		ib_mad_send_done_handler(port_priv, wc);	}}/* * IB MAD completion callback */static void ib_mad_completion_handler(void *data){	struct ib_mad_port_private *port_priv;	struct ib_wc wc;	port_priv = (struct ib_mad_port_private *)data;	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {		if (wc.status == IB_WC_SUCCESS) {			switch (wc.opcode) {			case IB_WC_SEND:				ib_mad_send_done_handler(port_priv, &wc);				break;			case IB_WC_RECV:				ib_mad_recv_done_handler(port_priv, &wc);				break;			default:				BUG_ON(1);				break;			}		} else			mad_error_handler(port_priv, &wc);	}}static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv){	unsigned long flags;	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;	struct ib_mad_send_wc mad_send_wc;	struct list_head cancel_list;	INIT_LIST_HEAD(&cancel_list);	spin_lock_irqsave(&mad_agent_priv->lock, flags);	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,				 &mad_agent_priv->send_list, agent_list) {		if (mad_send_wr->status == IB_WC_SUCCESS) { 			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);		}	}	/* Empty wait list to prevent receives from finding a request */	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);	/* Empty local completion list as well */	list_splice_init(&mad_agent_priv->local_list, &cancel_list);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	/* Report all cancelled requests */	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;	mad_send_wc.vendor_err = 0;	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,				 &cancel_list, agent_list) {		mad_send_wc.send_buf = &mad_send_wr->send_buf;		list_del(&mad_send_wr->agent_list);		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,						   &mad_send_wc);		atomic_dec(&mad_agent_priv->refcount);	}}static struct ib_mad_send_wr_private*find_send_wr(struct ib_mad_agent_private *mad_agent_priv,	     struct ib_mad_send_buf *send_buf){	struct ib_mad_send_wr_private *mad_send_wr;	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,			    agent_list) {		if (&mad_send_wr->send_buf == send_buf)			return mad_send_wr;	}	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,			    agent_list) {		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&		    &mad_send_wr->send_buf == send_buf)			return mad_send_wr;	}	return NULL;}int ib_modify_mad(struct ib_mad_agent *mad_agent,		  struct ib_mad_send_buf *send_buf, u32 timeout_ms){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_send_wr_private *mad_send_wr;	unsigned long flags;	int active;	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,				      agent);	spin_lock_irqsave(&mad_agent_priv->lock, flags);	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		return -EINVAL;	}	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);	if (!timeout_ms) {		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);	}	mad_send_wr->send_buf.timeout_ms = timeout_ms;	if (active)		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);	else		ib_reset_mad_timeout(mad_send_wr, timeout_ms);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	return 0;}EXPORT_SYMBOL(ib_modify_mad);void ib_cancel_mad(struct ib_mad_agent *mad_agent,		   struct ib_mad_send_buf *send_buf){	ib_modify_mad(mad_agent, send_buf, 0);}EXPORT_SYMBOL(ib_cancel_mad);static void local_completions(void *data){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_local_private *local;	struct ib_mad_agent_private *recv_mad_agent;	unsigned long flags;	int recv = 0;	struct ib_wc wc;	struct ib_mad_send_wc mad_send_wc;	mad_agent_priv = (struct ib_mad_agent_private *)data;	spin_lock_irqsave(&mad_agent_priv->lock, flags);	while (!list_empty(&mad_agent_priv->local_list)) {		local = list_entry(mad_agent_priv->local_list.next,				   struct ib_mad_local_private,				   completion_list);		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		if (local->mad_priv) {			recv_mad_agent = local->recv_mad_agent;			if (!recv_mad_agent) {				printk(KERN_ERR PFX "No receive MAD agent for local completion\n");				goto local_send_completion;			}			recv = 1;			/*			 * Defined behavior is to complete response			 * before request			 */			build_smp_wc((unsigned long) local->mad_send_wr,				     be16_to_cpu(IB_LID_PERMISSIVE),				     0, recv_mad_agent->agent.port_num, &wc);			local->mad_priv->header.recv_wc.wc = &wc;			local->mad_priv->header.recv_wc.mad_len =						sizeof(struct ib_mad);			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,				 &local->mad_priv->header.recv_wc.rmpp_list);			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;			local->mad_priv->header.recv_wc.recv_buf.mad =						&local->mad_priv->mad.mad;			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))				snoop_recv(recv_mad_agent->qp_info,					  &local->mad_priv->header.recv_wc,					   IB_MAD_SNOOP_RECVS);			recv_mad_agent->agent.recv_handler(						&recv_mad_agent->agent,						&local->mad_priv->header.recv_wc);			spin_lock_irqsave(&recv_mad_agent->lock, flags);			atomic_dec(&recv_mad_agent->refcount);			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);		}local_send_completion:		/* Complete send */		mad_send_wc.status = IB_WC_SUCCESS;		mad_send_wc.vendor_err = 0;		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))			snoop_send(mad_agent_priv->qp_info,				   &local->mad_send_wr->send_buf,				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,						   &mad_send_wc);		spin_lock_irqsave(&mad_agent_priv->lock, flags);		list_del(&local->completion_list);		atomic_dec(&mad_agent_priv->refcount);		if (!recv)			kmem_cache_free(ib_mad_cache, local->mad_priv);		kfree(local);	}	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);}static int retry_send(struct ib_mad_send_wr_private *mad_send_wr){	int ret;	if (!mad_send_wr->retries--)		return -ETIMEDOUT;	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);	if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {		ret = ib_retry_rmpp(mad_send_wr);		switch (ret) {		case IB_RMPP_RESULT_UNHANDLED:			ret = ib_send_mad(mad_send_wr);			break;		case IB_RMPP_RESULT_CONSUMED:			ret = 0;			break;		default:			ret = -ECOMM;			break;		}	} else		ret = ib_send_mad(mad_send_wr);	if (!ret) {		mad_send_wr->refcount++;		list_add_tail(&mad_send_wr->agent_list,			      &mad_send_wr->mad_agent_priv->send_list);	}	return ret;}static void timeout_sends(void *data){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad_send_wc mad_send_wc;	unsigned long flags, delay;	mad_agent_priv = (struct ib_mad_agent_private *)data;	mad_send_wc.vendor_err = 0;	spin_lock_irqsave(&mad_agent_priv->lock, flags);	while (!list_empty(&mad_agent_priv->wait_list)) {		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,					 struct ib_mad_send_wr_private,					 agent_list);		if (time_after(mad_send_wr->timeout, jiffies)) {			delay = mad_send_wr->timeout - jiffies;			if ((long)delay <= 0)				delay = 1;			queue_delayed_work(mad_agent_priv->qp_info->					   port_priv->wq,					   &mad_agent_priv->timed_work, delay);			break;		}		list_del(&mad_send_wr->agent_list);		if (mad_send_wr->status == IB_WC_SUCCESS &&		    !retry_send(mad_send_wr))			continue;		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		if (mad_send_wr->status == IB_WC_SUCCESS)			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;		else			mad_send_wc.status = mad_send_wr->status;		mad_send_wc.send_buf = &mad_send_wr->send_buf;		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,						   &mad_send_wc);		atomic_dec(&mad_agent_priv->refcount);		spin_lock_irqsave(&mad_agent_priv->lock, flags);	}	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);}static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg){	struct ib_mad_port_private *port_priv = cq->cq_context;	queue_work(port_priv->wq, &port_priv->work);}/* * Allocate receive MADs and post receive WRs for them */static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,				    struct ib_mad_private *mad){	unsigned long flags;	int post, ret;	struct ib_mad_private *mad_priv;	struct ib_sge sg_list;	struct ib_recv_wr recv_wr, *bad_recv_wr;	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;	/* Initialize common scatter list fields */	sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;	sg_list.lkey = (*qp_info->port_priv->mr).lkey;	/* Initialize common receive WR fields */	recv_wr.next = NULL;	recv_wr.sg_list = &sg_list;	recv_wr.num_sge = 1;	do {		/* Allocate and map receive buffer */		if (mad) {			mad_priv = mad;			mad = NULL;		} else {			mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);			if (!mad_priv) {				printk(KERN_ERR PFX "No memory for receive buffer\n");				ret = -ENOMEM;				break;			}		}		sg

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -