📄 ib_srp.c
字号:
n * sizeof (struct srp_direct_buf); } } else { struct srp_direct_buf *buf = (void *) cmd->add_data; dma_addr_t dma; dma = dma_map_single(target->srp_host->dev->dma_device, scmnd->request_buffer, scmnd->request_bufflen, scmnd->sc_data_direction); if (dma_mapping_error(dma)) { printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n", scmnd->request_buffer, (int) scmnd->request_bufflen, scmnd->sc_data_direction); return -EINVAL; } pci_unmap_addr_set(req, direct_mapping, dma); buf->va = cpu_to_be64(dma); buf->key = cpu_to_be32(target->srp_host->mr->rkey); buf->len = cpu_to_be32(scmnd->request_bufflen); fmt = SRP_DATA_DESC_DIRECT; len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); } if (scmnd->sc_data_direction == DMA_TO_DEVICE) cmd->buf_fmt = fmt << 4; else cmd->buf_fmt = fmt; return len;}static void srp_unmap_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, struct srp_request *req){ if (!scmnd->request_buffer || (scmnd->sc_data_direction != DMA_TO_DEVICE && scmnd->sc_data_direction != DMA_FROM_DEVICE)) return; if (scmnd->use_sg) dma_unmap_sg(target->srp_host->dev->dma_device, (struct scatterlist *) scmnd->request_buffer, scmnd->use_sg, scmnd->sc_data_direction); else dma_unmap_single(target->srp_host->dev->dma_device, pci_unmap_addr(req, direct_mapping), scmnd->request_bufflen, scmnd->sc_data_direction);}static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp){ struct srp_request *req; struct scsi_cmnd *scmnd; unsigned long flags; s32 delta; delta = (s32) be32_to_cpu(rsp->req_lim_delta); spin_lock_irqsave(target->scsi_host->host_lock, flags); target->req_lim += delta; req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { if (be32_to_cpu(rsp->resp_data_len) < 4) req->tsk_status = -1; else req->tsk_status = rsp->data[3]; complete(&req->done); } else { scmnd = req->scmnd; if (!scmnd) printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); scmnd->result = rsp->status; if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { memcpy(scmnd->sense_buffer, rsp->data + be32_to_cpu(rsp->resp_data_len), min_t(int, be32_to_cpu(rsp->sense_data_len), SCSI_SENSE_BUFFERSIZE)); } if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); srp_unmap_data(scmnd, target, req); if (!req->tsk_mgmt) { req->scmnd = NULL; scmnd->host_scribble = (void *) -1L; scmnd->scsi_done(scmnd); list_del(&req->list); req->next = target->req_head; target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT; } else req->cmd_done = 1; } spin_unlock_irqrestore(target->scsi_host->host_lock, flags);}static void srp_reconnect_work(void *target_ptr){ struct srp_target_port *target = target_ptr; srp_reconnect_target(target);}static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc){ struct srp_iu *iu; u8 opcode; iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, target->max_ti_iu_len, DMA_FROM_DEVICE); opcode = *(u8 *) iu->buf; if (0) { int i; printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); for (i = 0; i < wc->byte_len; ++i) { if (i % 8 == 0) printk(KERN_ERR " [%02x] ", i); printk(" %02x", ((u8 *) iu->buf)[i]); if ((i + 1) % 8 == 0) printk("\n"); } if (wc->byte_len % 8) printk("\n"); } switch (opcode) { case SRP_RSP: srp_process_rsp(target, iu->buf); break; case SRP_T_LOGOUT: /* XXX Handle target logout */ printk(KERN_WARNING PFX "Got target logout request\n"); break; default: printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); break; } dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, target->max_ti_iu_len, DMA_FROM_DEVICE);}static void srp_completion(struct ib_cq *cq, void *target_ptr){ struct srp_target_port *target = target_ptr; struct ib_wc wc; unsigned long flags; ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(cq, 1, &wc) > 0) { if (wc.status) { printk(KERN_ERR PFX "failed %s status %d\n", wc.wr_id & SRP_OP_RECV ? "receive" : "send", wc.status); spin_lock_irqsave(target->scsi_host->host_lock, flags); if (target->state == SRP_TARGET_LIVE) schedule_work(&target->work); spin_unlock_irqrestore(target->scsi_host->host_lock, flags); break; } if (wc.wr_id & SRP_OP_RECV) srp_handle_recv(target, &wc); else ++target->tx_tail; }}static int __srp_post_recv(struct srp_target_port *target){ struct srp_iu *iu; struct ib_sge list; struct ib_recv_wr wr, *bad_wr; unsigned int next; int ret; next = target->rx_head & (SRP_RQ_SIZE - 1); wr.wr_id = next | SRP_OP_RECV; iu = target->rx_ring[next]; list.addr = iu->dma; list.length = iu->size; list.lkey = target->srp_host->mr->lkey; wr.next = NULL; wr.sg_list = &list; wr.num_sge = 1; ret = ib_post_recv(target->qp, &wr, &bad_wr); if (!ret) ++target->rx_head; return ret;}static int srp_post_recv(struct srp_target_port *target){ unsigned long flags; int ret; spin_lock_irqsave(target->scsi_host->host_lock, flags); ret = __srp_post_recv(target); spin_unlock_irqrestore(target->scsi_host->host_lock, flags); return ret;}/* * Must be called with target->scsi_host->host_lock held to protect * req_lim and tx_head. Lock cannot be dropped between call here and * call to __srp_post_send(). */static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target){ if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) return NULL; if (unlikely(target->req_lim < 1)) { if (printk_ratelimit()) printk(KERN_DEBUG PFX "Target has req_lim %d\n", target->req_lim); return NULL; } return target->tx_ring[target->tx_head & SRP_SQ_SIZE];}/* * Must be called with target->scsi_host->host_lock held to protect * req_lim and tx_head. */static int __srp_post_send(struct srp_target_port *target, struct srp_iu *iu, int len){ struct ib_sge list; struct ib_send_wr wr, *bad_wr; int ret = 0; list.addr = iu->dma; list.length = len; list.lkey = target->srp_host->mr->lkey; wr.next = NULL; wr.wr_id = target->tx_head & SRP_SQ_SIZE; wr.sg_list = &list; wr.num_sge = 1; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; ret = ib_post_send(target->qp, &wr, &bad_wr); if (!ret) { ++target->tx_head; --target->req_lim; } return ret;}static int srp_queuecommand(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *)){ struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; long req_index; int len; if (target->state == SRP_TARGET_CONNECTING) goto err; if (target->state == SRP_TARGET_DEAD || target->state == SRP_TARGET_REMOVED) { scmnd->result = DID_BAD_TARGET << 16; done(scmnd); return 0; } iu = __srp_get_tx_iu(target); if (!iu) goto err; dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, SRP_MAX_IU_LEN, DMA_TO_DEVICE); req_index = target->req_head; scmnd->scsi_done = done; scmnd->result = 0; scmnd->host_scribble = (void *) req_index; cmd = iu->buf; memset(cmd, 0, sizeof *cmd); cmd->opcode = SRP_CMD; cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); cmd->tag = req_index; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); req = &target->req_ring[req_index]; req->scmnd = scmnd; req->cmd = iu; req->cmd_done = 0; req->tsk_mgmt = NULL; len = srp_map_data(scmnd, target, req); if (len < 0) { printk(KERN_ERR PFX "Failed to map data\n"); goto err; } if (__srp_post_recv(target)) { printk(KERN_ERR PFX "Recv failed\n"); goto err_unmap; } dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, SRP_MAX_IU_LEN, DMA_TO_DEVICE); if (__srp_post_send(target, iu, len)) { printk(KERN_ERR PFX "Send failed\n"); goto err_unmap; } target->req_head = req->next; list_add_tail(&req->list, &target->req_queue); return 0;err_unmap: srp_unmap_data(scmnd, target, req);err: return SCSI_MLQUEUE_HOST_BUSY;}static int srp_alloc_iu_bufs(struct srp_target_port *target){ int i; for (i = 0; i < SRP_RQ_SIZE; ++i) { target->rx_ring[i] = srp_alloc_iu(target->srp_host, target->max_ti_iu_len, GFP_KERNEL, DMA_FROM_DEVICE); if (!target->rx_ring[i]) goto err; } for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { target->tx_ring[i] = srp_alloc_iu(target->srp_host, SRP_MAX_IU_LEN, GFP_KERNEL, DMA_TO_DEVICE); if (!target->tx_ring[i]) goto err; } return 0;err: for (i = 0; i < SRP_RQ_SIZE; ++i) { srp_free_iu(target->srp_host, target->rx_ring[i]); target->rx_ring[i] = NULL; } for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { srp_free_iu(target->srp_host, target->tx_ring[i]); target->tx_ring[i] = NULL; } return -ENOMEM;}static void srp_cm_rej_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event, struct srp_target_port *target){ struct ib_class_port_info *cpi; int opcode; switch (event->param.rej_rcvd.reason) { case IB_CM_REJ_PORT_CM_REDIRECT: cpi = event->param.rej_rcvd.ari; target->path.dlid = cpi->redirect_lid; target->path.pkey = cpi->redirect_pkey; cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); target->status = target->path.dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; break; case IB_CM_REJ_PORT_REDIRECT: if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { /* * Topspin/Cisco SRP gateways incorrectly send * reject reason code 25 when they mean 24 * (port redirect). */ memcpy(target->path.dgid.raw, event->param.rej_rcvd.ari, 16); printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); target->status = SRP_PORT_REDIRECT; } else { printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); target->status = -ECONNRESET; } break; case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); target->status = -ECONNRESET; break; case IB_CM_REJ_CONSUMER_DEFINED: opcode = *(u8 *) event->private_data; if (opcode == SRP_LOGIN_REJ) { struct srp_login_rej *rej = event->private_data; u32 reason = be32_to_cpu(rej->reason); if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) printk(KERN_WARNING PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); else printk(KERN_WARNING PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); } else printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," " opcode 0x%02x\n", opcode); target->status = -ECONNRESET; break; default: printk(KERN_WARNING " REJ reason 0x%x\n", event->param.rej_rcvd.reason); target->status = -ECONNRESET; }}static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event){ struct srp_target_port *target = cm_id->context; struct ib_qp_attr *qp_attr = NULL; int attr_mask = 0; int comp = 0; int opcode = 0; switch (event->event) { case IB_CM_REQ_ERROR: printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); comp = 1; target->status = -ECONNRESET; break; case IB_CM_REP_RECEIVED: comp = 1; opcode = *(u8 *) event->private_data; if (opcode == SRP_LOGIN_RSP) { struct srp_login_rsp *rsp = event->private_data; target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); target->req_lim = be32_to_cpu(rsp->req_lim_delta); target->scsi_host->can_queue = min(target->req_lim, target->scsi_host->can_queue); } else { printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); target->status = -ECONNRESET; break; } target->status = srp_alloc_iu_bufs(target); if (target->status) break; qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); if (!qp_attr) { target->status = -ENOMEM; break; } qp_attr->qp_state = IB_QPS_RTR; target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); if (target->status) break; target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); if (target->status) break; target->status = srp_post_recv(target); if (target->status) break; qp_attr->qp_state = IB_QPS_RTS; target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); if (target->status) break; target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); if (target->status) break; target->status = ib_send_cm_rtu(cm_id, NULL, 0); if (target->status) break; break; case IB_CM_REJ_RECEIVED: printk(KERN_DEBUG PFX "REJ received\n"); comp = 1; srp_cm_rej_handler(cm_id, event, target); break; case IB_CM_MRA_RECEIVED: printk(KERN_ERR PFX "MRA received\n"); break; case IB_CM_DREP_RECEIVED: break; case IB_CM_TIMEWAIT_EXIT: printk(KERN_ERR PFX "connection closed\n"); comp = 1; target->status = 0; break; default: printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); break; } if (comp)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -