📄 ub.c
字号:
static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page){ struct usb_interface *intf; struct ub_dev *sc; struct list_head *p; struct ub_lun *lun; int cnt; unsigned long flags; int nc, nh; int i, j; struct ub_scsi_cmd_trace *t; intf = to_usb_interface(dev); sc = usb_get_intfdata(intf); if (sc == NULL) return 0; cnt = 0; spin_lock_irqsave(&sc->lock, flags); cnt += sprintf(page + cnt, "qlen %d qmax %d\n", sc->cmd_queue.qlen, sc->cmd_queue.qmax); cnt += sprintf(page + cnt, "sg %d %d %d %d %d .. %d\n", sc->sg_stat[0], sc->sg_stat[1], sc->sg_stat[2], sc->sg_stat[3], sc->sg_stat[4], sc->sg_stat[5]); list_for_each (p, &sc->luns) { lun = list_entry(p, struct ub_lun, link); cnt += sprintf(page + cnt, "lun %u changed %d removable %d readonly %d\n", lun->num, lun->changed, lun->removable, lun->readonly); } if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; for (j = 0; j < SCMD_TRACE_SZ; j++) { t = &sc->tr.vec[nc]; cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op); if (t->op == REQUEST_SENSE) { cnt += sprintf(page + cnt, " [sense %x %02x %02x]", t->key, t->asc, t->ascq); } else { cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir)); cnt += sprintf(page + cnt, " [%5d %5d]", t->req_size, t->act_size); } if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0; for (i = 0; i < SCMD_ST_HIST_SZ; i++) { cnt += sprintf(page + cnt, " %s", ub_scsi_cmd_stname[(int)t->st_hst[nh]]); if (++nh == SCMD_ST_HIST_SZ) nh = 0; } cnt += sprintf(page + cnt, "\n"); if (++nc == SCMD_TRACE_SZ) nc = 0; } spin_unlock_irqrestore(&sc->lock, flags); return cnt;}static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable *//* * The id allocator. * * This also stores the host for indexing by minor, which is somewhat dirty. */static int ub_id_get(void){ unsigned long flags; int i; spin_lock_irqsave(&ub_lock, flags); for (i = 0; i < UB_MAX_HOSTS; i++) { if (ub_hostv[i] == 0) { ub_hostv[i] = 1; spin_unlock_irqrestore(&ub_lock, flags); return i; } } spin_unlock_irqrestore(&ub_lock, flags); return -1;}static void ub_id_put(int id){ unsigned long flags; if (id < 0 || id >= UB_MAX_HOSTS) { printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); return; } spin_lock_irqsave(&ub_lock, flags); if (ub_hostv[id] == 0) { spin_unlock_irqrestore(&ub_lock, flags); printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); return; } ub_hostv[id] = 0; spin_unlock_irqrestore(&ub_lock, flags);}/* * Downcount for deallocation. This rides on two assumptions: * - once something is poisoned, its refcount cannot grow * - opens cannot happen at this time (del_gendisk was done) * If the above is true, we can drop the lock, which we need for * blk_cleanup_queue(): the silly thing may attempt to sleep. * [Actually, it never needs to sleep for us, but it calls might_sleep()] */static void ub_put(struct ub_dev *sc){ unsigned long flags; spin_lock_irqsave(&ub_lock, flags); --sc->openc; if (sc->openc == 0 && atomic_read(&sc->poison)) { spin_unlock_irqrestore(&ub_lock, flags); ub_cleanup(sc); } else { spin_unlock_irqrestore(&ub_lock, flags); }}/* * Final cleanup and deallocation. */static void ub_cleanup(struct ub_dev *sc){ struct list_head *p; struct ub_lun *lun; request_queue_t *q; while (!list_empty(&sc->luns)) { p = sc->luns.next; lun = list_entry(p, struct ub_lun, link); list_del(p); /* I don't think queue can be NULL. But... Stolen from sx8.c */ if ((q = lun->disk->queue) != NULL) blk_cleanup_queue(q); /* * If we zero disk->private_data BEFORE put_disk, we have * to check for NULL all over the place in open, release, * check_media and revalidate, because the block level * semaphore is well inside the put_disk. * But we cannot zero after the call, because *disk is gone. * The sd.c is blatantly racy in this area. */ /* disk->private_data = NULL; */ put_disk(lun->disk); lun->disk = NULL; ub_id_put(lun->id); kfree(lun); } kfree(sc);}/* * The "command allocator". */static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun){ struct ub_scsi_cmd *ret; if (lun->cmda[0]) return NULL; ret = &lun->cmdv[0]; lun->cmda[0] = 1; return ret;}static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd){ if (cmd != &lun->cmdv[0]) { printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", lun->name, cmd); return; } if (!lun->cmda[0]) { printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); return; } lun->cmda[0] = 0;}/* * The command queue. */static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd){ struct ub_scsi_cmd_queue *t = &sc->cmd_queue; if (t->qlen++ == 0) { t->head = cmd; t->tail = cmd; } else { t->tail->next = cmd; t->tail = cmd; } if (t->qlen > t->qmax) t->qmax = t->qlen;}static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd){ struct ub_scsi_cmd_queue *t = &sc->cmd_queue; if (t->qlen++ == 0) { t->head = cmd; t->tail = cmd; } else { cmd->next = t->head; t->head = cmd; } if (t->qlen > t->qmax) t->qmax = t->qlen;}static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc){ struct ub_scsi_cmd_queue *t = &sc->cmd_queue; struct ub_scsi_cmd *cmd; if (t->qlen == 0) return NULL; if (--t->qlen == 0) t->tail = NULL; cmd = t->head; t->head = cmd->next; cmd->next = NULL; return cmd;}#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)/* * The request function is our main entry point */static void ub_request_fn(request_queue_t *q){ struct ub_lun *lun = q->queuedata; struct request *rq; while ((rq = elv_next_request(q)) != NULL) { if (ub_request_fn_1(lun, rq) != 0) { blk_stop_queue(q); break; } }}static int ub_request_fn_1(struct ub_lun *lun, struct request *rq){ struct ub_dev *sc = lun->udev; struct ub_scsi_cmd *cmd; int rc; if (atomic_read(&sc->poison) || lun->changed) { blkdev_dequeue_request(rq); ub_end_rq(rq, 0); return 0; } if ((cmd = ub_get_cmd(lun)) == NULL) return -1; memset(cmd, 0, sizeof(struct ub_scsi_cmd)); blkdev_dequeue_request(rq); if (blk_pc_request(rq)) { rc = ub_cmd_build_packet(sc, lun, cmd, rq); } else { rc = ub_cmd_build_block(sc, lun, cmd, rq); } if (rc != 0) { ub_put_cmd(lun, cmd); ub_end_rq(rq, 0); return 0; } cmd->state = UB_CMDST_INIT; cmd->lun = lun; cmd->done = ub_rw_cmd_done; cmd->back = rq; cmd->tag = sc->tagcnt++; if (ub_submit_scsi(sc, cmd) != 0) { ub_put_cmd(lun, cmd); ub_end_rq(rq, 0); return 0; } return 0;}static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq){ int ub_dir; int n_elem; unsigned int block, nblks; if (rq_data_dir(rq) == WRITE) ub_dir = UB_DIR_WRITE; else ub_dir = UB_DIR_READ; cmd->dir = ub_dir; /* * get scatterlist from block layer */ n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); if (n_elem <= 0) { printk(KERN_INFO "%s: failed request map (%d)\n", sc->name, n_elem); /* P3 */ return -1; /* request with no s/g entries? */ } if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ printk(KERN_WARNING "%s: request with %d segments\n", sc->name, n_elem); return -1; } cmd->nsg = n_elem; sc->sg_stat[n_elem < 5 ? n_elem : 5]++; /* * build the command * * The call to blk_queue_hardsect_size() guarantees that request * is aligned, but it is given in terms of 512 byte units, always. */ block = rq->sector >> lun->capacity.bshift; nblks = rq->nr_sectors >> lun->capacity.bshift; cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ cmd->cdb[2] = block >> 24; cmd->cdb[3] = block >> 16; cmd->cdb[4] = block >> 8; cmd->cdb[5] = block; cmd->cdb[7] = nblks >> 8; cmd->cdb[8] = nblks; cmd->cdb_len = 10; cmd->len = rq->nr_sectors * 512; return 0;}static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq){ int n_elem; if (rq->data_len == 0) { cmd->dir = UB_DIR_NONE; } else { if (rq_data_dir(rq) == WRITE) cmd->dir = UB_DIR_WRITE; else cmd->dir = UB_DIR_READ; } /* * get scatterlist from block layer */ n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); if (n_elem < 0) { printk(KERN_INFO "%s: failed request map (%d)\n", sc->name, n_elem); /* P3 */ return -1; } if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ printk(KERN_WARNING "%s: request with %d segments\n", sc->name, n_elem); return -1; } cmd->nsg = n_elem; sc->sg_stat[n_elem < 5 ? n_elem : 5]++; memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); cmd->cdb_len = rq->cmd_len; cmd->len = rq->data_len; return 0;}static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd){ struct request *rq = cmd->back; struct ub_lun *lun = cmd->lun; int uptodate; if (cmd->error == 0) { uptodate = 1; if (blk_pc_request(rq)) { if (cmd->act_len >= rq->data_len) rq->data_len = 0; else rq->data_len -= cmd->act_len; } } else { uptodate = 0; if (blk_pc_request(rq)) { /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); rq->sense_len = UB_SENSE_SIZE; if (sc->top_sense[0] != 0) rq->errors = SAM_STAT_CHECK_CONDITION; else rq->errors = DID_ERROR << 16; } } ub_put_cmd(lun, cmd); ub_end_rq(rq, uptodate); blk_start_queue(lun->disk->queue);}static void ub_end_rq(struct request *rq, int uptodate){ int rc; rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors); // assert(rc == 0); end_that_request_last(rq);}/* * Submit a regular SCSI operation (not an auto-sense). * * The Iron Law of Good Submit Routine is: * Zero return - callback is done, Nonzero return - callback is not done. * No exceptions. * * Host is assumed locked. * * XXX We only support Bulk for the moment. */static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd){ if (cmd->state != UB_CMDST_INIT || (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { return -EINVAL; } ub_cmdq_add(sc, cmd); /* * We can call ub_scsi_dispatch(sc) right away here, but it's a little * safer to jump to a tasklet, in case upper layers do something silly. */ tasklet_schedule(&sc->tasklet); return 0;}/* * Submit the first URB for the queued command. * This function does not deal with queueing in any way. */static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd){ struct bulk_cb_wrap *bcb; int rc; bcb = &sc->work_bcb; /* * ``If the allocation length is eighteen or greater, and a device * server returns less than eithteen bytes of data, the application * client should assume that the bytes not transferred would have been * zeroes had the device server returned those bytes.'' * * We zero sense for all commands so that when a packet request * fails it does not return a stale sense. */ memset(&sc->top_sense, 0, UB_SENSE_SIZE); /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Tag = cmd->tag; /* Endianness is not important */ bcb->DataTransferLength = cpu_to_le32(cmd->len); bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -