⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iser_verbs.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *	- Redistributions of source code must retain the above *	  copyright notice, this list of conditions and the following *	  disclaimer. * *	- Redistributions in binary form must reproduce the above *	  copyright notice, this list of conditions and the following *	  disclaimer in the documentation and/or other materials *	  provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $ */#include <linux/kernel.h>#include <linux/module.h>#include <linux/delay.h>#include <linux/version.h>#include "iscsi_iser.h"#define ISCSI_ISER_MAX_CONN	8#define ISER_MAX_CQ_LEN		((ISER_QP_MAX_RECV_DTOS + \				ISER_QP_MAX_REQ_DTOS) *   \				 ISCSI_ISER_MAX_CONN)static void iser_cq_tasklet_fn(unsigned long data);static void iser_cq_callback(struct ib_cq *cq, void *cq_context);static void iser_cq_event_callback(struct ib_event *cause, void *context){	iser_err("got cq event %d \n", cause->event);}static void iser_qp_event_callback(struct ib_event *cause, void *context){	iser_err("got qp event %d\n",cause->event);}/** * iser_create_device_ib_res - creates Protection Domain (PD), Completion * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with * the adapator. * * returns 0 on success, -1 on failure */static int iser_create_device_ib_res(struct iser_device *device){	device->pd = ib_alloc_pd(device->ib_device);	if (IS_ERR(device->pd))		goto pd_err;	device->cq = ib_create_cq(device->ib_device,				  iser_cq_callback,				  iser_cq_event_callback,				  (void *)device,				  ISER_MAX_CQ_LEN, 0);	if (IS_ERR(device->cq))		goto cq_err;	if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP))		goto cq_arm_err;	tasklet_init(&device->cq_tasklet,		     iser_cq_tasklet_fn,		     (unsigned long)device);	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |				   IB_ACCESS_REMOTE_WRITE |				   IB_ACCESS_REMOTE_READ);	if (IS_ERR(device->mr))		goto dma_mr_err;	return 0;dma_mr_err:	tasklet_kill(&device->cq_tasklet);cq_arm_err:	ib_destroy_cq(device->cq);cq_err:	ib_dealloc_pd(device->pd);pd_err:	iser_err("failed to allocate an IB resource\n");	return -1;}/** * iser_free_device_ib_res - destory/dealloc/dereg the DMA MR, * CQ and PD created with the device associated with the adapator. */static void iser_free_device_ib_res(struct iser_device *device){	BUG_ON(device->mr == NULL);	tasklet_kill(&device->cq_tasklet);	(void)ib_dereg_mr(device->mr);	(void)ib_destroy_cq(device->cq);	(void)ib_dealloc_pd(device->pd);	device->mr = NULL;	device->cq = NULL;	device->pd = NULL;}/** * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP) * * returns 0 on success, -1 on failure */static int iser_create_ib_conn_res(struct iser_conn *ib_conn){	struct iser_device	*device;	struct ib_qp_init_attr	init_attr;	int			ret;	struct ib_fmr_pool_param params;	BUG_ON(ib_conn->device == NULL);	device = ib_conn->device;	ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +				    (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),				    GFP_KERNEL);	if (!ib_conn->page_vec) {		ret = -ENOMEM;		goto alloc_err;	}	ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);	params.page_shift        = SHIFT_4K;	/* when the first/last SG element are not start/end *	 * page aligned, the map whould be of N+1 pages     */	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;	/* make the pool size twice the max number of SCSI commands *	 * the ML is expected to queue, watermark for unmap at 50%  */	params.pool_size	 = ISCSI_DEF_XMIT_CMDS_MAX * 2;	params.dirty_watermark	 = ISCSI_DEF_XMIT_CMDS_MAX;	params.cache		 = 0;	params.flush_function	 = NULL;	params.access		 = (IB_ACCESS_LOCAL_WRITE  |				    IB_ACCESS_REMOTE_WRITE |				    IB_ACCESS_REMOTE_READ);	ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);	if (IS_ERR(ib_conn->fmr_pool)) {		ret = PTR_ERR(ib_conn->fmr_pool);		goto fmr_pool_err;	}	memset(&init_attr, 0, sizeof init_attr);	init_attr.event_handler = iser_qp_event_callback;	init_attr.qp_context	= (void *)ib_conn;	init_attr.send_cq	= device->cq;	init_attr.recv_cq	= device->cq;	init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS;	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;	init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN;	init_attr.cap.max_recv_sge = 2;	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;	init_attr.qp_type	= IB_QPT_RC;	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);	if (ret)		goto qp_err;	ib_conn->qp = ib_conn->cma_id->qp;	iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",		 ib_conn, ib_conn->cma_id,		 ib_conn->fmr_pool, ib_conn->cma_id->qp);	return ret;qp_err:	(void)ib_destroy_fmr_pool(ib_conn->fmr_pool);fmr_pool_err:	kfree(ib_conn->page_vec);alloc_err:	iser_err("unable to alloc mem or create resource, err %d\n", ret);	return ret;}/** * releases the FMR pool, QP and CMA ID objects, returns 0 on success, * -1 on failure */static int iser_free_ib_conn_res(struct iser_conn *ib_conn){	BUG_ON(ib_conn == NULL);	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",		 ib_conn, ib_conn->cma_id,		 ib_conn->fmr_pool, ib_conn->qp);	/* qp is created only once both addr & route are resolved */	if (ib_conn->fmr_pool != NULL)		ib_destroy_fmr_pool(ib_conn->fmr_pool);	if (ib_conn->qp != NULL)		rdma_destroy_qp(ib_conn->cma_id);	if (ib_conn->cma_id != NULL)		rdma_destroy_id(ib_conn->cma_id);	ib_conn->fmr_pool = NULL;	ib_conn->qp	  = NULL;	ib_conn->cma_id   = NULL;	kfree(ib_conn->page_vec);	return 0;}/** * based on the resolved device node GUID see if there already allocated * device for this device. If there's no such, create one. */staticstruct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id){	struct list_head    *p_list;	struct iser_device  *device = NULL;	mutex_lock(&ig.device_list_mutex);	p_list = ig.device_list.next;	while (p_list != &ig.device_list) {		device = list_entry(p_list, struct iser_device, ig_list);		/* find if there's a match using the node GUID */		if (device->ib_device->node_guid == cma_id->device->node_guid)			break;	}	if (device == NULL) {		device = kzalloc(sizeof *device, GFP_KERNEL);		if (device == NULL)			goto out;		/* assign this device to the device */		device->ib_device = cma_id->device;		/* init the device and link it into ig device list */		if (iser_create_device_ib_res(device)) {			kfree(device);			device = NULL;			goto out;		}		list_add(&device->ig_list, &ig.device_list);	}out:	BUG_ON(device == NULL);	device->refcount++;	mutex_unlock(&ig.device_list_mutex);	return device;}/* if there's no demand for this device, release it */static void iser_device_try_release(struct iser_device *device){	mutex_lock(&ig.device_list_mutex);	device->refcount--;	iser_err("device %p refcount %d\n",device,device->refcount);	if (!device->refcount) {		iser_free_device_ib_res(device);		list_del(&device->ig_list);		kfree(device);	}	mutex_unlock(&ig.device_list_mutex);}int iser_conn_state_comp(struct iser_conn *ib_conn,			enum iser_ib_conn_state comp){	int ret;	spin_lock_bh(&ib_conn->lock);	ret = (ib_conn->state == comp);	spin_unlock_bh(&ib_conn->lock);	return ret;}static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,				     enum iser_ib_conn_state comp,				     enum iser_ib_conn_state exch){	int ret;	spin_lock_bh(&ib_conn->lock);	if ((ret = (ib_conn->state == comp)))		ib_conn->state = exch;	spin_unlock_bh(&ib_conn->lock);	return ret;}/** * Frees all conn objects and deallocs conn descriptor */static void iser_conn_release(struct iser_conn *ib_conn){	struct iser_device  *device = ib_conn->device;	BUG_ON(ib_conn->state != ISER_CONN_DOWN);	mutex_lock(&ig.connlist_mutex);	list_del(&ib_conn->conn_list);	mutex_unlock(&ig.connlist_mutex);	iser_free_ib_conn_res(ib_conn);	ib_conn->device = NULL;	/* on EVENT_ADDR_ERROR there's no device yet for this conn */	if (device != NULL)		iser_device_try_release(device);	if (ib_conn->iser_conn)		ib_conn->iser_conn->ib_conn = NULL;	kfree(ib_conn);}/** * triggers start of the disconnect procedures and wait for them to be done */void iser_conn_terminate(struct iser_conn *ib_conn){	int err = 0;	/* change the ib conn state only if the conn is UP, however always call	 * rdma_disconnect since this is the only way to cause the CMA to change	 * the QP state to ERROR	 */	iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);	err = rdma_disconnect(ib_conn->cma_id);	if (err)		iser_err("Failed to disconnect, conn: 0x%p err %d\n",			 ib_conn,err);	wait_event_interruptible(ib_conn->wait,				 ib_conn->state == ISER_CONN_DOWN);	iser_conn_release(ib_conn);}static void iser_connect_error(struct rdma_cm_id *cma_id){	struct iser_conn *ib_conn;	ib_conn = (struct iser_conn *)cma_id->context;	ib_conn->state = ISER_CONN_DOWN;	wake_up_interruptible(&ib_conn->wait);}static void iser_addr_handler(struct rdma_cm_id *cma_id){	struct iser_device *device;	struct iser_conn   *ib_conn;	int    ret;	device = iser_device_find_by_ib_device(cma_id);	ib_conn = (struct iser_conn *)cma_id->context;	ib_conn->device = device;	ret = rdma_resolve_route(cma_id, 1000);	if (ret) {		iser_err("resolve route failed: %d\n", ret);		iser_connect_error(cma_id);	}	return;}static void iser_route_handler(struct rdma_cm_id *cma_id){	struct rdma_conn_param conn_param;	int    ret;	ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);	if (ret)		goto failure;	iser_dbg("path.mtu is %d setting it to %d\n",		 cma_id->route.path_rec->mtu, IB_MTU_1024);	/* we must set the MTU to 1024 as this is what the target is assuming */	if (cma_id->route.path_rec->mtu > IB_MTU_1024)		cma_id->route.path_rec->mtu = IB_MTU_1024;	memset(&conn_param, 0, sizeof conn_param);	conn_param.responder_resources = 4;	conn_param.initiator_depth     = 1;	conn_param.retry_count	       = 7;	conn_param.rnr_retry_count     = 6;	ret = rdma_connect(cma_id, &conn_param);	if (ret) {		iser_err("failure connecting: %d\n", ret);		goto failure;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -