mad.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 2,279 行 · 第 1/5 页

C
2,279
字号
		*vendor_table = vendor;	}	if (!(*vendor_table)->vendor_class[vclass]) {		/* Allocate table for this management vendor class */		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);		if (!vendor_class) {			printk(KERN_ERR PFX "No memory for "			       "ib_mad_mgmt_vendor_class\n");			goto error2;		}		(*vendor_table)->vendor_class[vclass] = vendor_class;	}	for (i = 0; i < MAX_MGMT_OUI; i++) {		/* Is there matching OUI for this vendor class ? */		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],			    mad_reg_req->oui, 3)) {			method = &(*vendor_table)->vendor_class[						vclass]->method_table[i];			BUG_ON(!*method);			goto check_in_use;		}	}	for (i = 0; i < MAX_MGMT_OUI; i++) {		/* OUI slot available ? */		if (!is_vendor_oui((*vendor_table)->vendor_class[				vclass]->oui[i])) {			method = &(*vendor_table)->vendor_class[				vclass]->method_table[i];			BUG_ON(*method);			/* Allocate method table for this OUI */			if ((ret = allocate_method_table(method)))				goto error3;			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],			       mad_reg_req->oui, 3);			goto check_in_use;		}	}	printk(KERN_ERR PFX "All OUI slots in use\n");	goto error3;check_in_use:	/* Now, make sure methods are not already in use */	if (method_in_use(method, mad_reg_req))		goto error4;	/* Finally, add in methods being registered */	for (i = find_first_bit(mad_reg_req->method_mask,				IB_MGMT_MAX_METHODS);	     i < IB_MGMT_MAX_METHODS;	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,			       1+i)) {		(*method)->agent[i] = agent_priv;	}	return 0;error4:	/* Remove any methods for this mad agent */	remove_methods_mad_agent(*method, agent_priv);	/* Now, check to see if there are any methods in use */	if (!check_method_table(*method)) {		/* If not, release management method table */		kfree(*method);		*method = NULL;	}	ret = -EINVAL;error3:	if (vendor_class) {		(*vendor_table)->vendor_class[vclass] = NULL;		kfree(vendor_class);	}error2:	if (vendor) {		*vendor_table = NULL;		kfree(vendor);	}error1:	return ret;}static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv){	struct ib_mad_port_private *port_priv;	struct ib_mad_mgmt_class_table *class;	struct ib_mad_mgmt_method_table *method;	struct ib_mad_mgmt_vendor_class_table *vendor;	struct ib_mad_mgmt_vendor_class *vendor_class;	int index;	u8 mgmt_class;	/*	 * Was MAD registration request supplied	 * with original registration ?	 */	if (!agent_priv->reg_req) {		goto out;	}	port_priv = agent_priv->qp_info->port_priv;	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);	class = port_priv->version[			agent_priv->reg_req->mgmt_class_version].class;	if (!class)		goto vendor_check;	method = class->method_table[mgmt_class];	if (method) {		/* Remove any methods for this mad agent */		remove_methods_mad_agent(method, agent_priv);		/* Now, check to see if there are any methods still in use */		if (!check_method_table(method)) {			/* If not, release management method table */			 kfree(method);			 class->method_table[mgmt_class] = NULL;			 /* Any management classes left ? */			if (!check_class_table(class)) {				/* If not, release management class table */				kfree(class);				port_priv->version[					agent_priv->reg_req->					mgmt_class_version].class = NULL;			}		}	}vendor_check:	if (!is_vendor_class(mgmt_class))		goto out;	/* normalize mgmt_class to vendor range 2 */	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);	vendor = port_priv->version[			agent_priv->reg_req->mgmt_class_version].vendor;	if (!vendor)		goto out;	vendor_class = vendor->vendor_class[mgmt_class];	if (vendor_class) {		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);		if (index < 0)			goto out;		method = vendor_class->method_table[index];		if (method) {			/* Remove any methods for this mad agent */			remove_methods_mad_agent(method, agent_priv);			/*			 * Now, check to see if there are			 * any methods still in use			 */			if (!check_method_table(method)) {				/* If not, release management method table */				kfree(method);				vendor_class->method_table[index] = NULL;				memset(vendor_class->oui[index], 0, 3);				/* Any OUIs left ? */				if (!check_vendor_class(vendor_class)) {					/* If not, release vendor class table */					kfree(vendor_class);					vendor->vendor_class[mgmt_class] = NULL;					/* Any other vendor classes left ? */					if (!check_vendor_table(vendor)) {						kfree(vendor);						port_priv->version[							agent_priv->reg_req->							mgmt_class_version].							vendor = NULL;					}				}			}		}	}out:	return;}static struct ib_mad_agent_private *find_mad_agent(struct ib_mad_port_private *port_priv,	       struct ib_mad *mad){	struct ib_mad_agent_private *mad_agent = NULL;	unsigned long flags;	spin_lock_irqsave(&port_priv->reg_lock, flags);	if (response_mad(mad)) {		u32 hi_tid;		struct ib_mad_agent_private *entry;		/*		 * Routing is based on high 32 bits of transaction ID		 * of MAD.		 */		hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {			if (entry->agent.hi_tid == hi_tid) {				mad_agent = entry;				break;			}		}	} else {		struct ib_mad_mgmt_class_table *class;		struct ib_mad_mgmt_method_table *method;		struct ib_mad_mgmt_vendor_class_table *vendor;		struct ib_mad_mgmt_vendor_class *vendor_class;		struct ib_vendor_mad *vendor_mad;		int index;		/*		 * Routing is based on version, class, and method		 * For "newer" vendor MADs, also based on OUI		 */		if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)			goto out;		if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {			class = port_priv->version[					mad->mad_hdr.class_version].class;			if (!class)				goto out;			method = class->method_table[convert_mgmt_class(							mad->mad_hdr.mgmt_class)];			if (method)				mad_agent = method->agent[mad->mad_hdr.method &							  ~IB_MGMT_METHOD_RESP];		} else {			vendor = port_priv->version[					mad->mad_hdr.class_version].vendor;			if (!vendor)				goto out;			vendor_class = vendor->vendor_class[vendor_class_index(						mad->mad_hdr.mgmt_class)];			if (!vendor_class)				goto out;			/* Find matching OUI */			vendor_mad = (struct ib_vendor_mad *)mad;			index = find_vendor_oui(vendor_class, vendor_mad->oui);			if (index == -1)				goto out;			method = vendor_class->method_table[index];			if (method) {				mad_agent = method->agent[mad->mad_hdr.method &							  ~IB_MGMT_METHOD_RESP];			}		}	}	if (mad_agent) {		if (mad_agent->agent.recv_handler)			atomic_inc(&mad_agent->refcount);		else {			printk(KERN_NOTICE PFX "No receive handler for client "			       "%p on port %d\n",			       &mad_agent->agent, port_priv->port_num);			mad_agent = NULL;		}	}out:	spin_unlock_irqrestore(&port_priv->reg_lock, flags);	return mad_agent;}static int validate_mad(struct ib_mad *mad, u32 qp_num){	int valid = 0;	/* Make sure MAD base version is understood */	if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {		printk(KERN_ERR PFX "MAD received with unsupported base "		       "version %d\n", mad->mad_hdr.base_version);		goto out;	}	/* Filter SMI packets sent to other than QP0 */	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||	    (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {		if (qp_num == 0)			valid = 1;	} else {		/* Filter GSI packets sent to QP0 */		if (qp_num != 0)			valid = 1;	}out:	return valid;}static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,		       struct ib_mad_hdr *mad_hdr){	struct ib_rmpp_mad *rmpp_mad;	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;	return !mad_agent_priv->agent.rmpp_version ||		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &				    IB_MGMT_RMPP_FLAG_ACTIVE) ||		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);}static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,				     struct ib_mad_recv_wc *rwc){	return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==		rwc->recv_buf.mad->mad_hdr.mgmt_class;}static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr,				   struct ib_mad_recv_wc *rwc ){	struct ib_ah_attr attr;	u8 send_resp, rcv_resp;	send_resp = ((struct ib_mad *)(wr->send_buf.mad))->		     mad_hdr.method & IB_MGMT_METHOD_RESP;	rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;	if (!send_resp && rcv_resp)		/* is request/response. GID/LIDs are both local (same). */		return 1;	if (send_resp == rcv_resp)		/* both requests, or both responses. GIDs different */		return 0;	if (ib_query_ah(wr->send_buf.ah, &attr))		/* Assume not equal, to avoid false positives. */		return 0;	if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH))		return attr.dlid == rwc->wc->slid;	else if ((attr.ah_flags & IB_AH_GRH) &&		 (rwc->wc->wc_flags & IB_WC_GRH))		return memcmp(attr.grh.dgid.raw,			      rwc->recv_buf.grh->sgid.raw, 16) == 0;	else		/* one has GID, other does not.  Assume different */		return 0;}struct ib_mad_send_wr_private*ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,		 struct ib_mad_recv_wc *mad_recv_wc){	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad *mad;	mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad;	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,			    agent_list) {		if ((mad_send_wr->tid == mad->mad_hdr.tid) &&		    rcv_has_same_class(mad_send_wr, mad_recv_wc) &&		    rcv_has_same_gid(mad_send_wr, mad_recv_wc))			return mad_send_wr;	}	/*	 * It's possible to receive the response before we've	 * been notified that the send has completed	 */	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,			    agent_list) {		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&		    mad_send_wr->tid == mad->mad_hdr.tid &&		    mad_send_wr->timeout &&		    rcv_has_same_class(mad_send_wr, mad_recv_wc) &&		    rcv_has_same_gid(mad_send_wr, mad_recv_wc)) {			/* Verify request has not been canceled */			return (mad_send_wr->status == IB_WC_SUCCESS) ?				mad_send_wr : NULL;		}	}	return NULL;}void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr){	mad_send_wr->timeout = 0;	if (mad_send_wr->refcount == 1) {		list_del(&mad_send_wr->agent_list);		list_add_tail(&mad_send_wr->agent_list,			      &mad_send_wr->mad_agent_priv->done_list);	}}static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,				 struct ib_mad_recv_wc *mad_recv_wc){	struct ib_mad_send_wr_private *mad_send_wr;	struct ib_mad_send_wc mad_send_wc;	unsigned long flags;	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);	if (mad_agent_priv->agent.rmpp_version) {		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,						      mad_recv_wc);		if (!mad_recv_wc) {			deref_mad_agent(mad_agent_priv);			return;		}	}	/* Complete corresponding request */	if (response_mad(mad_recv_wc->recv_buf.mad)) {		spin_lock_irqsave(&mad_agent_priv->lock, flags);		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);		if (!mad_send_wr) {			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);			ib_free_recv_mad(mad_recv_wc);			deref_mad_agent(mad_agent_priv);			return;		}		ib_mark_mad_done(mad_send_wr);		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);		/* Defined behavior is to complete response before request */		mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,						   mad_recv_wc);		atomic_dec(&mad_agent_priv->refcount);		mad_send_wc.status = IB_WC_SUCCESS;		mad_send_wc.vendor_err = 0;		mad_send_wc.send_buf = &mad_send_wr->send_buf;		ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);	} else {		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,						   mad_recv_wc);		deref_mad_agent(mad_agent_priv);	}}static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,				     struct ib_wc *wc){	struct ib_mad_qp_info *qp_info;	struct ib_mad_private_header *mad_priv_hdr;	struct ib_mad_private *recv, *response;	struct ib_mad_list_head *mad_list;	struct ib_mad_agent_private *mad_agent;	response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);	if (!response)		printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "		       "for response buffer\n");	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;	qp_info = mad_list->mad_queue->qp_info;	dequeue_mad(mad_list);	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,				    mad_list);	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);	dma_unmap_single(port_priv->device->dma_device,			 pci_unmap_addr(&recv->header, mapping),

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?