mad.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 2,279 行 · 第 1/5 页

C
2,279
字号
		ret = ERR_PTR(-ENOMEM);		goto error1;	}	/* Now, fill in the various structures */	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];	mad_snoop_priv->agent.device = device;	mad_snoop_priv->agent.recv_handler = recv_handler;	mad_snoop_priv->agent.snoop_handler = snoop_handler;	mad_snoop_priv->agent.context = context;	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;	mad_snoop_priv->agent.port_num = port_num;	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;	init_completion(&mad_snoop_priv->comp);	mad_snoop_priv->snoop_index = register_snoop_agent(						&port_priv->qp_info[qpn],						mad_snoop_priv);	if (mad_snoop_priv->snoop_index < 0) {		ret = ERR_PTR(mad_snoop_priv->snoop_index);		goto error2;	}	atomic_set(&mad_snoop_priv->refcount, 1);	return &mad_snoop_priv->agent;error2:	kfree(mad_snoop_priv);error1:	return ret;}EXPORT_SYMBOL(ib_register_mad_snoop);static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv){	if (atomic_dec_and_test(&mad_agent_priv->refcount))		complete(&mad_agent_priv->comp);}static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv){	if (atomic_dec_and_test(&mad_snoop_priv->refcount))		complete(&mad_snoop_priv->comp);}static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv){	struct ib_mad_port_private *port_priv;	unsigned long flags;	/* Note that we could still be handling received MADs */	/*	 * Canceling all sends results in dropping received response	 * MADs, preventing us from queuing additional work	 */	cancel_mads(mad_agent_priv);	port_priv = mad_agent_priv->qp_info->port_priv;	cancel_delayed_work(&mad_agent_priv->timed_work);	spin_lock_irqsave(&port_priv->reg_lock, flags);	remove_mad_reg_req(mad_agent_priv);	list_del(&mad_agent_priv->agent_list);	spin_unlock_irqrestore(&port_priv->reg_lock, flags);	flush_workqueue(port_priv->wq);	ib_cancel_rmpp_recvs(mad_agent_priv);	deref_mad_agent(mad_agent_priv);	wait_for_completion(&mad_agent_priv->comp);	kfree(mad_agent_priv->reg_req);	ib_dereg_mr(mad_agent_priv->agent.mr);	kfree(mad_agent_priv);}static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv){	struct ib_mad_qp_info *qp_info;	unsigned long flags;	qp_info = mad_snoop_priv->qp_info;	spin_lock_irqsave(&qp_info->snoop_lock, flags);	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;	atomic_dec(&qp_info->snoop_count);	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);	deref_snoop_agent(mad_snoop_priv);	wait_for_completion(&mad_snoop_priv->comp);	kfree(mad_snoop_priv);}/* * ib_unregister_mad_agent - Unregisters a client from using MAD services */int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_snoop_private *mad_snoop_priv;	/* If the TID is zero, the agent can only snoop. */	if (mad_agent->hi_tid) {		mad_agent_priv = container_of(mad_agent,					      struct ib_mad_agent_private,					      agent);		unregister_mad_agent(mad_agent_priv);	} else {		mad_snoop_priv = container_of(mad_agent,					      struct ib_mad_snoop_private,					      agent);		unregister_mad_snoop(mad_snoop_priv);	}	return 0;}EXPORT_SYMBOL(ib_unregister_mad_agent);static inline int response_mad(struct ib_mad *mad){	/* Trap represses are responses although response bit is reset */	return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||		(mad->mad_hdr.method & IB_MGMT_METHOD_RESP));}static void dequeue_mad(struct ib_mad_list_head *mad_list){	struct ib_mad_queue *mad_queue;	unsigned long flags;	BUG_ON(!mad_list->mad_queue);	mad_queue = mad_list->mad_queue;	spin_lock_irqsave(&mad_queue->lock, flags);	list_del(&mad_list->list);	mad_queue->count--;	spin_unlock_irqrestore(&mad_queue->lock, flags);}static void snoop_send(struct ib_mad_qp_info *qp_info,		       struct ib_mad_send_buf *send_buf,		       struct ib_mad_send_wc *mad_send_wc,		       int mad_snoop_flags){	struct ib_mad_snoop_private *mad_snoop_priv;	unsigned long flags;	int i;	spin_lock_irqsave(&qp_info->snoop_lock, flags);	for (i = 0; i < qp_info->snoop_table_size; i++) {		mad_snoop_priv = qp_info->snoop_table[i];		if (!mad_snoop_priv ||		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))			continue;		atomic_inc(&mad_snoop_priv->refcount);		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,						    send_buf, mad_send_wc);		deref_snoop_agent(mad_snoop_priv);		spin_lock_irqsave(&qp_info->snoop_lock, flags);	}	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);}static void snoop_recv(struct ib_mad_qp_info *qp_info,		       struct ib_mad_recv_wc *mad_recv_wc,		       int mad_snoop_flags){	struct ib_mad_snoop_private *mad_snoop_priv;	unsigned long flags;	int i;	spin_lock_irqsave(&qp_info->snoop_lock, flags);	for (i = 0; i < qp_info->snoop_table_size; i++) {		mad_snoop_priv = qp_info->snoop_table[i];		if (!mad_snoop_priv ||		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))			continue;		atomic_inc(&mad_snoop_priv->refcount);		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,						   mad_recv_wc);		deref_snoop_agent(mad_snoop_priv);		spin_lock_irqsave(&qp_info->snoop_lock, flags);	}	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);}static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,			 struct ib_wc *wc){	memset(wc, 0, sizeof *wc);	wc->wr_id = wr_id;	wc->status = IB_WC_SUCCESS;	wc->opcode = IB_WC_RECV;	wc->pkey_index = pkey_index;	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);	wc->src_qp = IB_QP0;	wc->qp_num = IB_QP0;	wc->slid = slid;	wc->sl = 0;	wc->dlid_path_bits = 0;	wc->port_num = port_num;}/* * Return 0 if SMP is to be sent * Return 1 if SMP was consumed locally (whether or not solicited) * Return < 0 if error */static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,				  struct ib_mad_send_wr_private *mad_send_wr){	int ret;	struct ib_smp *smp = mad_send_wr->send_buf.mad;	unsigned long flags;	struct ib_mad_local_private *local;	struct ib_mad_private *mad_priv;	struct ib_mad_port_private *port_priv;	struct ib_mad_agent_private *recv_mad_agent = NULL;	struct ib_device *device = mad_agent_priv->agent.device;	u8 port_num = mad_agent_priv->agent.port_num;	struct ib_wc mad_wc;	struct ib_send_wr *send_wr = &mad_send_wr->send_wr;	/*	 * Directed route handling starts if the initial LID routed part of	 * a request or the ending LID routed part of a response is empty.	 * If we are at the start of the LID routed part, don't update the	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.	 */	if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==	     IB_LID_PERMISSIVE &&	    !smi_handle_dr_smp_send(smp, device->node_type, port_num)) {		ret = -EINVAL;		printk(KERN_ERR PFX "Invalid directed route\n");		goto out;	}	/* Check to post send on QP or process locally */	ret = smi_check_local_smp(smp, device);	if (!ret)		goto out;	local = kmalloc(sizeof *local, GFP_ATOMIC);	if (!local) {		ret = -ENOMEM;		printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");		goto out;	}	local->mad_priv = NULL;	local->recv_mad_agent = NULL;	mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);	if (!mad_priv) {		ret = -ENOMEM;		printk(KERN_ERR PFX "No memory for local response MAD\n");		kfree(local);		goto out;	}	build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),		     send_wr->wr.ud.pkey_index,		     send_wr->wr.ud.port_num, &mad_wc);	/* No GRH for DR SMP */	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,				  (struct ib_mad *)smp,				  (struct ib_mad *)&mad_priv->mad);	switch (ret)	{	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:		if (response_mad(&mad_priv->mad.mad) &&		    mad_agent_priv->agent.recv_handler) {			local->mad_priv = mad_priv;			local->recv_mad_agent = mad_agent_priv;			/*			 * Reference MAD agent until receive			 * side of local completion handled			 */			atomic_inc(&mad_agent_priv->refcount);		} else			kmem_cache_free(ib_mad_cache, mad_priv);		break;	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:		kmem_cache_free(ib_mad_cache, mad_priv);		break;	case IB_MAD_RESULT_SUCCESS:		/* Treat like an incoming receive MAD */		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,					    mad_agent_priv->agent.port_num);		if (port_priv) {			mad_priv->mad.mad.mad_hdr.tid =				((struct ib_mad *)smp)->mad_hdr.tid;			recv_mad_agent = find_mad_agent(port_priv,						        &mad_priv->mad.mad);		}		if (!port_priv || !recv_mad_agent) {			kmem_cache_free(ib_mad_cache, mad_priv);			kfree(local);			ret = 0;			goto out;		}		local->mad_priv = mad_priv;		local->recv_mad_agent = recv_mad_agent;		break;	default:		kmem_cache_free(ib_mad_cache, mad_priv);		kfree(local);		ret = -EINVAL;		goto out;	}	local->mad_send_wr = mad_send_wr;	/* Reference MAD agent until send side of local completion handled */	atomic_inc(&mad_agent_priv->refcount);	/* Queue local completion to local list */	spin_lock_irqsave(&mad_agent_priv->lock, flags);	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);	queue_work(mad_agent_priv->qp_info->port_priv->wq,		   &mad_agent_priv->local_work);	ret = 1;out:	return ret;}static int get_pad_size(int hdr_len, int data_len){	int seg_size, pad;	seg_size = sizeof(struct ib_mad) - hdr_len;	if (data_len && seg_size) {		pad = seg_size - data_len % seg_size;		return pad == seg_size ? 0 : pad;	} else		return seg_size;}static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr){	struct ib_rmpp_segment *s, *t;	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {		list_del(&s->list);		kfree(s);	}}static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,				gfp_t gfp_mask){	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;	struct ib_rmpp_segment *seg = NULL;	int left, seg_size, pad;	send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;	seg_size = send_buf->seg_size;	pad = send_wr->pad;	/* Allocate data segments. */	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);		if (!seg) {			printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "			       "alloc failed for len %zd, gfp %#x\n",			       sizeof (*seg) + seg_size, gfp_mask);			free_send_rmpp_list(send_wr);			return -ENOMEM;		}		seg->num = ++send_buf->seg_count;		list_add_tail(&seg->list, &send_wr->rmpp_list);	}	/* Zero any padding */	if (pad)		memset(seg->data + seg_size - pad, 0, pad);	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->					  agent.rmpp_version;	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,					struct ib_rmpp_segment, list);	send_wr->last_ack_seg = send_wr->cur_seg;	return 0;}struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,					    u32 remote_qpn, u16 pkey_index,					    int rmpp_active,					    int hdr_len, int data_len,					    gfp_t gfp_mask){	struct ib_mad_agent_private *mad_agent_priv;	struct ib_mad_send_wr_private *mad_send_wr;	int pad, message_size, ret, size;	void *buf;	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,				      agent);	pad = get_pad_size(hdr_len, data_len);	message_size = hdr_len + data_len + pad;	if ((!mad_agent->rmpp_version &&	     (rmpp_active || message_size > sizeof(struct ib_mad))) ||	    (!rmpp_active && message_size > sizeof(struct ib_mad)))		return ERR_PTR(-EINVAL);	size = rmpp_active ? hdr_len : sizeof(struct ib_mad);	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);	if (!buf)		return ERR_PTR(-ENOMEM);	mad_send_wr = buf + size;	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);	mad_send_wr->send_buf.mad = buf;	mad_send_wr->send_buf.hdr_len = hdr_len;	mad_send_wr->send_buf.data_len = data_len;	mad_send_wr->pad = pad;	mad_send_wr->mad_agent_priv = mad_agent_priv;	mad_send_wr->sg_list[0].length = hdr_len;	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;	mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;	mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;	mad_send_wr->send_wr.num_sge = 2;	mad_send_wr->send_wr.opcode = IB_WR_SEND;	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;	mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;	if (rmpp_active) {		ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);		if (ret) {			kfree(buf);			return ERR_PTR(ret);		}	}	mad_send_wr->send_buf.mad_agent = mad_agent;	atomic_inc(&mad_agent_priv->refcount);	return &mad_send_wr->send_buf;}EXPORT_SYMBOL(ib_create_send_mad);int ib_get_mad_data_offset(u8 mgmt_class){	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)		return IB_MGMT_SA_HDR;	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||		 (mgmt_class == IB_MGMT_CLASS_BIS))

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?