⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ib_srp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	target->cm_id = new_cm_id;	qp_attr.qp_state = IB_QPS_RESET;	ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);	if (ret)		goto err;	ret = srp_init_qp(target, target->qp);	if (ret)		goto err;	while (ib_poll_cq(target->cq, 1, &wc) > 0)		; /* nothing */	spin_lock_irq(target->scsi_host->host_lock);	list_for_each_entry_safe(req, tmp, &target->req_queue, list)		srp_reset_req(target, req);	spin_unlock_irq(target->scsi_host->host_lock);	target->rx_head	 = 0;	target->tx_head	 = 0;	target->tx_tail  = 0;	target->qp_in_error = 0;	ret = srp_connect_target(target);	if (ret)		goto err;	spin_lock_irq(target->scsi_host->host_lock);	if (target->state == SRP_TARGET_CONNECTING) {		ret = 0;		target->state = SRP_TARGET_LIVE;	} else		ret = -EAGAIN;	spin_unlock_irq(target->scsi_host->host_lock);	return ret;err:	printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);	/*	 * We couldn't reconnect, so kill our target port off.	 * However, we have to defer the real removal because we might	 * be in the context of the SCSI error handler now, which	 * would deadlock if we call scsi_remove_host().	 */	spin_lock_irq(target->scsi_host->host_lock);	if (target->state == SRP_TARGET_CONNECTING) {		target->state = SRP_TARGET_DEAD;		INIT_WORK(&target->work, srp_remove_work);		schedule_work(&target->work);	}	spin_unlock_irq(target->scsi_host->host_lock);	return ret;}static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,		       int sg_cnt, struct srp_request *req,		       struct srp_direct_buf *buf){	u64 io_addr = 0;	u64 *dma_pages;	u32 len;	int page_cnt;	int i, j;	int ret;	struct srp_device *dev = target->srp_host->dev;	struct ib_device *ibdev = dev->dev;	struct scatterlist *sg;	if (!dev->fmr_pool)		return -ENODEV;	if (srp_target_is_mellanox(target) &&	    (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))		return -EINVAL;	len = page_cnt = 0;	scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {		unsigned int dma_len = ib_sg_dma_len(ibdev, sg);		if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {			if (i > 0)				return -EINVAL;			else				++page_cnt;		}		if ((ib_sg_dma_address(ibdev, sg) + dma_len) &		    ~dev->fmr_page_mask) {			if (i < sg_cnt - 1)				return -EINVAL;			else				++page_cnt;		}		len += dma_len;	}	page_cnt += len >> dev->fmr_page_shift;	if (page_cnt > SRP_FMR_SIZE)		return -ENOMEM;	dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);	if (!dma_pages)		return -ENOMEM;	page_cnt = 0;	scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {		unsigned int dma_len = ib_sg_dma_len(ibdev, sg);		for (j = 0; j < dma_len; j += dev->fmr_page_size)			dma_pages[page_cnt++] =				(ib_sg_dma_address(ibdev, sg) &				 dev->fmr_page_mask) + j;	}	req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,					dma_pages, page_cnt, io_addr);	if (IS_ERR(req->fmr)) {		ret = PTR_ERR(req->fmr);		req->fmr = NULL;		goto out;	}	buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &			       ~dev->fmr_page_mask);	buf->key = cpu_to_be32(req->fmr->fmr->rkey);	buf->len = cpu_to_be32(len);	ret = 0;out:	kfree(dma_pages);	return ret;}static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,			struct srp_request *req){	struct scatterlist *scat;	struct srp_cmd *cmd = req->cmd->buf;	int len, nents, count;	u8 fmt = SRP_DATA_DESC_DIRECT;	struct srp_device *dev;	struct ib_device *ibdev;	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)		return sizeof (struct srp_cmd);	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&	    scmnd->sc_data_direction != DMA_TO_DEVICE) {		printk(KERN_WARNING PFX "Unhandled data direction %d\n",		       scmnd->sc_data_direction);		return -EINVAL;	}	nents = scsi_sg_count(scmnd);	scat  = scsi_sglist(scmnd);	dev = target->srp_host->dev;	ibdev = dev->dev;	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);	fmt = SRP_DATA_DESC_DIRECT;	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);	if (count == 1) {		/*		 * The midlayer only generated a single gather/scatter		 * entry, or DMA mapping coalesced everything to a		 * single entry.  So a direct descriptor along with		 * the DMA MR suffices.		 */		struct srp_direct_buf *buf = (void *) cmd->add_data;		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));		buf->key = cpu_to_be32(dev->mr->rkey);		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));	} else if (srp_map_fmr(target, scat, count, req,			       (void *) cmd->add_data)) {		/*		 * FMR mapping failed, and the scatterlist has more		 * than one entry.  Generate an indirect memory		 * descriptor.		 */		struct srp_indirect_buf *buf = (void *) cmd->add_data;		struct scatterlist *sg;		u32 datalen = 0;		int i;		fmt = SRP_DATA_DESC_INDIRECT;		len = sizeof (struct srp_cmd) +			sizeof (struct srp_indirect_buf) +			count * sizeof (struct srp_direct_buf);		scsi_for_each_sg(scmnd, sg, count, i) {			unsigned int dma_len = ib_sg_dma_len(ibdev, sg);			buf->desc_list[i].va  =				cpu_to_be64(ib_sg_dma_address(ibdev, sg));			buf->desc_list[i].key =				cpu_to_be32(dev->mr->rkey);			buf->desc_list[i].len = cpu_to_be32(dma_len);			datalen += dma_len;		}		if (scmnd->sc_data_direction == DMA_TO_DEVICE)			cmd->data_out_desc_cnt = count;		else			cmd->data_in_desc_cnt = count;		buf->table_desc.va  =			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);		buf->table_desc.key =			cpu_to_be32(target->srp_host->dev->mr->rkey);		buf->table_desc.len =			cpu_to_be32(count * sizeof (struct srp_direct_buf));		buf->len = cpu_to_be32(datalen);	}	if (scmnd->sc_data_direction == DMA_TO_DEVICE)		cmd->buf_fmt = fmt << 4;	else		cmd->buf_fmt = fmt;	return len;}static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp){	struct srp_request *req;	struct scsi_cmnd *scmnd;	unsigned long flags;	s32 delta;	delta = (s32) be32_to_cpu(rsp->req_lim_delta);	spin_lock_irqsave(target->scsi_host->host_lock, flags);	target->req_lim += delta;	req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {		if (be32_to_cpu(rsp->resp_data_len) < 4)			req->tsk_status = -1;		else			req->tsk_status = rsp->data[3];		complete(&req->done);	} else {		scmnd = req->scmnd;		if (!scmnd)			printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",			       (unsigned long long) rsp->tag);		scmnd->result = rsp->status;		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {			memcpy(scmnd->sense_buffer, rsp->data +			       be32_to_cpu(rsp->resp_data_len),			       min_t(int, be32_to_cpu(rsp->sense_data_len),				     SCSI_SENSE_BUFFERSIZE));		}		if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));		if (!req->tsk_mgmt) {			scmnd->host_scribble = (void *) -1L;			scmnd->scsi_done(scmnd);			srp_remove_req(target, req);		} else			req->cmd_done = 1;	}	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);}static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc){	struct ib_device *dev;	struct srp_iu *iu;	u8 opcode;	iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];	dev = target->srp_host->dev->dev;	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,				   DMA_FROM_DEVICE);	opcode = *(u8 *) iu->buf;	if (0) {		int i;		printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);		for (i = 0; i < wc->byte_len; ++i) {			if (i % 8 == 0)				printk(KERN_ERR "  [%02x] ", i);			printk(" %02x", ((u8 *) iu->buf)[i]);			if ((i + 1) % 8 == 0)				printk("\n");		}		if (wc->byte_len % 8)			printk("\n");	}	switch (opcode) {	case SRP_RSP:		srp_process_rsp(target, iu->buf);		break;	case SRP_T_LOGOUT:		/* XXX Handle target logout */		printk(KERN_WARNING PFX "Got target logout request\n");		break;	default:		printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);		break;	}	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,				      DMA_FROM_DEVICE);}static void srp_completion(struct ib_cq *cq, void *target_ptr){	struct srp_target_port *target = target_ptr;	struct ib_wc wc;	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);	while (ib_poll_cq(cq, 1, &wc) > 0) {		if (wc.status) {			printk(KERN_ERR PFX "failed %s status %d\n",			       wc.wr_id & SRP_OP_RECV ? "receive" : "send",			       wc.status);			target->qp_in_error = 1;			break;		}		if (wc.wr_id & SRP_OP_RECV)			srp_handle_recv(target, &wc);		else			++target->tx_tail;	}}static int __srp_post_recv(struct srp_target_port *target){	struct srp_iu *iu;	struct ib_sge list;	struct ib_recv_wr wr, *bad_wr;	unsigned int next;	int ret;	next 	 = target->rx_head & (SRP_RQ_SIZE - 1);	wr.wr_id = next | SRP_OP_RECV;	iu 	 = target->rx_ring[next];	list.addr   = iu->dma;	list.length = iu->size;	list.lkey   = target->srp_host->dev->mr->lkey;	wr.next     = NULL;	wr.sg_list  = &list;	wr.num_sge  = 1;	ret = ib_post_recv(target->qp, &wr, &bad_wr);	if (!ret)		++target->rx_head;	return ret;}static int srp_post_recv(struct srp_target_port *target){	unsigned long flags;	int ret;	spin_lock_irqsave(target->scsi_host->host_lock, flags);	ret = __srp_post_recv(target);	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);	return ret;}/* * Must be called with target->scsi_host->host_lock held to protect * req_lim and tx_head.  Lock cannot be dropped between call here and * call to __srp_post_send(). */static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target){	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)		return NULL;	if (unlikely(target->req_lim < 1))		++target->zero_req_lim;	return target->tx_ring[target->tx_head & SRP_SQ_SIZE];}/* * Must be called with target->scsi_host->host_lock held to protect * req_lim and tx_head. */static int __srp_post_send(struct srp_target_port *target,			   struct srp_iu *iu, int len){	struct ib_sge list;	struct ib_send_wr wr, *bad_wr;	int ret = 0;	list.addr   = iu->dma;	list.length = len;	list.lkey   = target->srp_host->dev->mr->lkey;	wr.next       = NULL;	wr.wr_id      = target->tx_head & SRP_SQ_SIZE;	wr.sg_list    = &list;	wr.num_sge    = 1;	wr.opcode     = IB_WR_SEND;	wr.send_flags = IB_SEND_SIGNALED;	ret = ib_post_send(target->qp, &wr, &bad_wr);	if (!ret) {		++target->tx_head;		--target->req_lim;	}	return ret;}static int srp_queuecommand(struct scsi_cmnd *scmnd,			    void (*done)(struct scsi_cmnd *)){	struct srp_target_port *target = host_to_target(scmnd->device->host);	struct srp_request *req;	struct srp_iu *iu;	struct srp_cmd *cmd;	struct ib_device *dev;	int len;	if (target->state == SRP_TARGET_CONNECTING)		goto err;	if (target->state == SRP_TARGET_DEAD ||	    target->state == SRP_TARGET_REMOVED) {		scmnd->result = DID_BAD_TARGET << 16;		done(scmnd);		return 0;	}	iu = __srp_get_tx_iu(target);	if (!iu)		goto err;	dev = target->srp_host->dev->dev;	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,				   DMA_TO_DEVICE);	req = list_entry(target->free_reqs.next, struct srp_request, list);	scmnd->scsi_done     = done;	scmnd->result        = 0;	scmnd->host_scribble = (void *) (long) req->index;	cmd = iu->buf;	memset(cmd, 0, sizeof *cmd);	cmd->opcode = SRP_CMD;	cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);	cmd->tag    = req->index;	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);	req->scmnd    = scmnd;	req->cmd      = iu;	req->cmd_done = 0;	req->tsk_mgmt = NULL;	len = srp_map_data(scmnd, target, req);	if (len < 0) {		printk(KERN_ERR PFX "Failed to map data\n");		goto err;	}	if (__srp_post_recv(target)) {		printk(KERN_ERR PFX "Recv failed\n");		goto err_unmap;	}	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,				      DMA_TO_DEVICE);	if (__srp_post_send(target, iu, len)) {		printk(KERN_ERR PFX "Send failed\n");		goto err_unmap;	}	list_move_tail(&req->list, &target->req_queue);	return 0;err_unmap:	srp_unmap_data(scmnd, target, req);err:	return SCSI_MLQUEUE_HOST_BUSY;}static int srp_alloc_iu_bufs(struct srp_target_port *target){	int i;	for (i = 0; i < SRP_RQ_SIZE; ++i) {		target->rx_ring[i] = srp_alloc_iu(target->srp_host,						  target->max_ti_iu_len,						  GFP_KERNEL, DMA_FROM_DEVICE);		if (!target->rx_ring[i])			goto err;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -