⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 user_mad.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2004 Topspin Communications.  All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $ */#include <linux/module.h>#include <linux/init.h>#include <linux/device.h>#include <linux/err.h>#include <linux/fs.h>#include <linux/cdev.h>#include <linux/pci.h>#include <linux/dma-mapping.h>#include <linux/poll.h>#include <linux/rwsem.h>#include <linux/kref.h>#include <asm/uaccess.h>#include <asm/semaphore.h>#include <rdma/ib_mad.h>#include <rdma/ib_user_mad.h>MODULE_AUTHOR("Roland Dreier");MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");MODULE_LICENSE("Dual BSD/GPL");enum {	IB_UMAD_MAX_PORTS  = 64,	IB_UMAD_MAX_AGENTS = 32,	IB_UMAD_MAJOR      = 231,	IB_UMAD_MINOR_BASE = 0};/* * Our lifetime rules for these structs are the following: each time a * device special file is opened, we look up the corresponding struct * ib_umad_port by minor in the umad_port[] table while holding the * port_lock.  If this lookup succeeds, we take a reference on the * ib_umad_port's struct ib_umad_device while still holding the * port_lock; if the lookup fails, we fail the open().  We drop these * references in the corresponding close(). * * In addition to references coming from open character devices, there * is one more reference to each ib_umad_device representing the * module's reference taken when allocating the ib_umad_device in * ib_umad_add_one(). * * When destroying an ib_umad_device, we clear all of its * ib_umad_ports from umad_port[] while holding port_lock before * dropping the module's reference to the ib_umad_device.  This is * always safe because any open() calls will either succeed and obtain * a reference before we clear the umad_port[] entries, or fail after * we clear the umad_port[] entries. */struct ib_umad_port {	struct cdev           *dev;	struct class_device   *class_dev;	struct cdev           *sm_dev;	struct class_device   *sm_class_dev;	struct semaphore       sm_sem;	struct rw_semaphore    mutex;	struct list_head       file_list;	struct ib_device      *ib_dev;	struct ib_umad_device *umad_dev;	int                    dev_num;	u8                     port_num;};struct ib_umad_device {	int                  start_port, end_port;	struct kref          ref;	struct ib_umad_port  port[0];};struct ib_umad_file {	struct ib_umad_port    *port;	struct list_head	recv_list;	struct list_head	port_list;	spinlock_t		recv_lock;	wait_queue_head_t	recv_wait;	struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];	int			agents_dead;};struct ib_umad_packet {	struct ib_mad_send_buf *msg;	struct list_head   list;	int		   length;	struct ib_user_mad mad;};static struct class *umad_class;static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);static DEFINE_SPINLOCK(port_lock);static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);static void ib_umad_add_one(struct ib_device *device);static void ib_umad_remove_one(struct ib_device *device);static void ib_umad_release_dev(struct kref *ref){	struct ib_umad_device *dev =		container_of(ref, struct ib_umad_device, ref);	kfree(dev);}/* caller must hold port->mutex at least for reading */static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id){	return file->agents_dead ? NULL : file->agent[id];}static int queue_packet(struct ib_umad_file *file,			struct ib_mad_agent *agent,			struct ib_umad_packet *packet){	int ret = 1;	down_read(&file->port->mutex);	for (packet->mad.hdr.id = 0;	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;	     packet->mad.hdr.id++)		if (agent == __get_agent(file, packet->mad.hdr.id)) {			spin_lock_irq(&file->recv_lock);			list_add_tail(&packet->list, &file->recv_list);			spin_unlock_irq(&file->recv_lock);			wake_up_interruptible(&file->recv_wait);			ret = 0;			break;		}	up_read(&file->port->mutex);	return ret;}static void send_handler(struct ib_mad_agent *agent,			 struct ib_mad_send_wc *send_wc){	struct ib_umad_file *file = agent->context;	struct ib_umad_packet *timeout;	struct ib_umad_packet *packet = send_wc->send_buf->context[0];	ib_destroy_ah(packet->msg->ah);	ib_free_send_mad(packet->msg);	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {		timeout = kzalloc(sizeof *timeout + IB_MGMT_MAD_HDR, GFP_KERNEL);		if (!timeout)			goto out;		timeout->length 	= IB_MGMT_MAD_HDR;		timeout->mad.hdr.id 	= packet->mad.hdr.id;		timeout->mad.hdr.status = ETIMEDOUT;		memcpy(timeout->mad.data, packet->mad.data,		       sizeof (struct ib_mad_hdr));		if (!queue_packet(file, agent, timeout))				return;	}out:	kfree(packet);}static void recv_handler(struct ib_mad_agent *agent,			 struct ib_mad_recv_wc *mad_recv_wc){	struct ib_umad_file *file = agent->context;	struct ib_umad_packet *packet;	int length;	if (mad_recv_wc->wc->status != IB_WC_SUCCESS)		goto out;	length = mad_recv_wc->mad_len;	packet = kzalloc(sizeof *packet + length, GFP_KERNEL);	if (!packet)		goto out;	packet->length = length;	ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);	packet->mad.hdr.status    = 0;	packet->mad.hdr.length    = length + sizeof (struct ib_user_mad);	packet->mad.hdr.qpn 	  = cpu_to_be32(mad_recv_wc->wc->src_qp);	packet->mad.hdr.lid 	  = cpu_to_be16(mad_recv_wc->wc->slid);	packet->mad.hdr.sl  	  = mad_recv_wc->wc->sl;	packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;	packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);	if (packet->mad.hdr.grh_present) {		/* XXX parse GRH */		packet->mad.hdr.gid_index 	= 0;		packet->mad.hdr.hop_limit 	= 0;		packet->mad.hdr.traffic_class	= 0;		memset(packet->mad.hdr.gid, 0, 16);		packet->mad.hdr.flow_label	= 0;	}	if (queue_packet(file, agent, packet))		kfree(packet);out:	ib_free_recv_mad(mad_recv_wc);}static ssize_t ib_umad_read(struct file *filp, char __user *buf,			    size_t count, loff_t *pos){	struct ib_umad_file *file = filp->private_data;	struct ib_umad_packet *packet;	ssize_t ret;	if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))		return -EINVAL;	spin_lock_irq(&file->recv_lock);	while (list_empty(&file->recv_list)) {		spin_unlock_irq(&file->recv_lock);		if (filp->f_flags & O_NONBLOCK)			return -EAGAIN;		if (wait_event_interruptible(file->recv_wait,					     !list_empty(&file->recv_list)))			return -ERESTARTSYS;		spin_lock_irq(&file->recv_lock);	}	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);	list_del(&packet->list);	spin_unlock_irq(&file->recv_lock);	if (count < packet->length + sizeof (struct ib_user_mad)) {		/* Return length needed (and first RMPP segment) if too small */		if (copy_to_user(buf, &packet->mad,				 sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))			ret = -EFAULT;		else			ret = -ENOSPC;	} else if (copy_to_user(buf, &packet->mad,				packet->length + sizeof (struct ib_user_mad)))		ret = -EFAULT;	else		ret = packet->length + sizeof (struct ib_user_mad);	if (ret < 0) {		/* Requeue packet */		spin_lock_irq(&file->recv_lock);		list_add(&packet->list, &file->recv_list);		spin_unlock_irq(&file->recv_lock);	} else		kfree(packet);	return ret;}static ssize_t ib_umad_write(struct file *filp, const char __user *buf,			     size_t count, loff_t *pos){	struct ib_umad_file *file = filp->private_data;	struct ib_umad_packet *packet;	struct ib_mad_agent *agent;	struct ib_ah_attr ah_attr;	struct ib_ah *ah;	struct ib_rmpp_mad *rmpp_mad;	u8 method;	__be64 *tid;	int ret, length, hdr_len, copy_offset;	int rmpp_active, has_rmpp_header;	if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)		return -EINVAL;	length = count - sizeof (struct ib_user_mad);	packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);	if (!packet)		return -ENOMEM;	if (copy_from_user(&packet->mad, buf,			    sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) {		ret = -EFAULT;		goto err;	}	if (packet->mad.hdr.id < 0 ||	    packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {		ret = -EINVAL;		goto err;	}	down_read(&file->port->mutex);	agent = __get_agent(file, packet->mad.hdr.id);	if (!agent) {		ret = -EINVAL;		goto err_up;	}	memset(&ah_attr, 0, sizeof ah_attr);	ah_attr.dlid          = be16_to_cpu(packet->mad.hdr.lid);	ah_attr.sl            = packet->mad.hdr.sl;	ah_attr.src_path_bits = packet->mad.hdr.path_bits;	ah_attr.port_num      = file->port->port_num;	if (packet->mad.hdr.grh_present) {		ah_attr.ah_flags = IB_AH_GRH;		memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);		ah_attr.grh.flow_label 	   = be32_to_cpu(packet->mad.hdr.flow_label);		ah_attr.grh.hop_limit  	   = packet->mad.hdr.hop_limit;		ah_attr.grh.traffic_class  = packet->mad.hdr.traffic_class;	}	ah = ib_create_ah(agent->qp->pd, &ah_attr);	if (IS_ERR(ah)) {		ret = PTR_ERR(ah);		goto err_up;	}	rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;	if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {		hdr_len = IB_MGMT_SA_HDR;		copy_offset = IB_MGMT_RMPP_HDR;		has_rmpp_header = 1;	} else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&		   rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {			hdr_len = IB_MGMT_VENDOR_HDR;			copy_offset = IB_MGMT_RMPP_HDR;			has_rmpp_header = 1;	} else {		hdr_len = IB_MGMT_MAD_HDR;		copy_offset = IB_MGMT_MAD_HDR;		has_rmpp_header = 0;	}	if (has_rmpp_header)		rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &			      IB_MGMT_RMPP_FLAG_ACTIVE;	else		rmpp_active = 0;	/* Validate that the management class can support RMPP */	if (rmpp_active && !agent->rmpp_version) {		ret = -EINVAL;		goto err_ah;	}	packet->msg = ib_create_send_mad(agent,					 be32_to_cpu(packet->mad.hdr.qpn),					 0, rmpp_active,					 hdr_len, length - hdr_len,					 GFP_KERNEL);	if (IS_ERR(packet->msg)) {		ret = PTR_ERR(packet->msg);		goto err_ah;	}	packet->msg->ah 	= ah;	packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;	packet->msg->retries 	= packet->mad.hdr.retries;	packet->msg->context[0] = packet;	/* Copy MAD headers (RMPP header in place) */	memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);	/* Now, copy rest of message from user into send buffer */	if (copy_from_user(packet->msg->mad + copy_offset,			   buf + sizeof (struct ib_user_mad) + copy_offset,			   length - copy_offset)) {		ret = -EFAULT;		goto err_msg;	}	/*	 * If userspace is generating a request that will generate a	 * response, we need to make sure the high-order part of the	 * transaction ID matches the agent being used to send the	 * MAD.	 */	method = ((struct ib_mad_hdr *) packet->msg->mad)->method;	if (!(method & IB_MGMT_METHOD_RESP)       &&	    method != IB_MGMT_METHOD_TRAP_REPRESS &&	    method != IB_MGMT_METHOD_SEND) {		tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;		*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |				   (be64_to_cpup(tid) & 0xffffffff));	}	ret = ib_post_send_mad(packet->msg, NULL);	if (ret)		goto err_msg;	up_read(&file->port->mutex);	return count;err_msg:	ib_free_send_mad(packet->msg);err_ah:	ib_destroy_ah(ah);err_up:	up_read(&file->port->mutex);err:	kfree(packet);	return ret;}static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait){	struct ib_umad_file *file = filp->private_data;	/* we will always be able to post a MAD send */	unsigned int mask = POLLOUT | POLLWRNORM;	poll_wait(filp, &file->recv_wait, wait);	if (!list_empty(&file->recv_list))		mask |= POLLIN | POLLRDNORM;	return mask;}static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg){	struct ib_user_mad_reg_req ureq;	struct ib_mad_reg_req req;	struct ib_mad_agent *agent;	int agent_id;	int ret;	down_write(&file->port->mutex);	if (!file->port->ib_dev) {		ret = -EPIPE;		goto out;	}	if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {		ret = -EFAULT;		goto out;	}	if (ureq.qpn != 0 && ureq.qpn != 1) {		ret = -EINVAL;		goto out;	}	for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)		if (!__get_agent(file, agent_id))			goto found;	ret = -ENOMEM;	goto out;found:	if (ureq.mgmt_class) {		req.mgmt_class         = ureq.mgmt_class;		req.mgmt_class_version = ureq.mgmt_class_version;		memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask);		memcpy(req.oui,         ureq.oui,         sizeof req.oui);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -