📄 ub.c
字号:
/* */static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd){ unsigned char *sense = sc->top_sense; struct ub_scsi_cmd *cmd; /* * Find the command which triggered the unit attention or a check, * save the sense into it, and advance its state machine. */ if ((cmd = ub_cmdq_peek(sc)) == NULL) { printk(KERN_WARNING "%s: sense done while idle\n", sc->name); return; } if (cmd != scmd->back) { printk(KERN_WARNING "%s: " "sense done for wrong command 0x%x\n", sc->name, cmd->tag); return; } if (cmd->state != UB_CMDST_SENSE) { printk(KERN_WARNING "%s: " "sense done with bad cmd state %d\n", sc->name, cmd->state); return; } /* * Ignoring scmd->act_len, because the buffer was pre-zeroed. */ cmd->key = sense[2] & 0x0F; cmd->asc = sense[12]; cmd->ascq = sense[13]; ub_scsi_urb_compl(sc, cmd);}/* * Reset management * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing. * XXX Make usb_sync_reset asynchronous. */static void ub_reset_enter(struct ub_dev *sc, int try){ if (sc->reset) { /* This happens often on multi-LUN devices. */ return; } sc->reset = try + 1;#if 0 /* Not needed because the disconnect waits for us. */ unsigned long flags; spin_lock_irqsave(&ub_lock, flags); sc->openc++; spin_unlock_irqrestore(&ub_lock, flags);#endif#if 0 /* We let them stop themselves. */ struct ub_lun *lun; list_for_each_entry(lun, &sc->luns, link) { blk_stop_queue(lun->disk->queue); }#endif schedule_work(&sc->reset_work);}static void ub_reset_task(struct work_struct *work){ struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); unsigned long flags; struct ub_lun *lun; int lkr, rc; if (!sc->reset) { printk(KERN_WARNING "%s: Running reset unrequested\n", sc->name); return; } if (atomic_read(&sc->poison)) { ; } else if ((sc->reset & 1) == 0) { ub_sync_reset(sc); msleep(700); /* usb-storage sleeps 6s (!) */ ub_probe_clear_stall(sc, sc->recv_bulk_pipe); ub_probe_clear_stall(sc, sc->send_bulk_pipe); } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { ; } else { if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { printk(KERN_NOTICE "%s: usb_lock_device_for_reset failed (%d)\n", sc->name, lkr); } else { rc = usb_reset_device(sc->dev); if (rc < 0) { printk(KERN_NOTICE "%s: " "usb_lock_device_for_reset failed (%d)\n", sc->name, rc); } if (lkr) usb_unlock_device(sc->dev); } } /* * In theory, no commands can be running while reset is active, * so nobody can ask for another reset, and so we do not need any * queues of resets or anything. We do need a spinlock though, * to interact with block layer. */ spin_lock_irqsave(sc->lock, flags); sc->reset = 0; tasklet_schedule(&sc->tasklet); list_for_each_entry(lun, &sc->luns, link) { blk_start_queue(lun->disk->queue); } wake_up(&sc->reset_wait); spin_unlock_irqrestore(sc->lock, flags);}/* * This is called from a process context. */static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun){ lun->readonly = 0; /* XXX Query this from the device */ lun->capacity.nsec = 0; lun->capacity.bsize = 512; lun->capacity.bshift = 0; if (ub_sync_tur(sc, lun) != 0) return; /* Not ready */ lun->changed = 0; if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { /* * The retry here means something is wrong, either with the * device, with the transport, or with our code. * We keep this because sd.c has retries for capacity. */ if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { lun->capacity.nsec = 0; lun->capacity.bsize = 512; lun->capacity.bshift = 0; } }}/* * The open funcion. * This is mostly needed to keep refcounting, but also to support * media checks on removable media drives. */static int ub_bd_open(struct inode *inode, struct file *filp){ struct gendisk *disk = inode->i_bdev->bd_disk; struct ub_lun *lun = disk->private_data; struct ub_dev *sc = lun->udev; unsigned long flags; int rc; spin_lock_irqsave(&ub_lock, flags); if (atomic_read(&sc->poison)) { spin_unlock_irqrestore(&ub_lock, flags); return -ENXIO; } sc->openc++; spin_unlock_irqrestore(&ub_lock, flags); if (lun->removable || lun->readonly) check_disk_change(inode->i_bdev); /* * The sd.c considers ->media_present and ->changed not equivalent, * under some pretty murky conditions (a failure of READ CAPACITY). * We may need it one day. */ if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { rc = -ENOMEDIUM; goto err_open; } if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { rc = -EROFS; goto err_open; } return 0;err_open: ub_put(sc); return rc;}/* */static int ub_bd_release(struct inode *inode, struct file *filp){ struct gendisk *disk = inode->i_bdev->bd_disk; struct ub_lun *lun = disk->private_data; struct ub_dev *sc = lun->udev; ub_put(sc); return 0;}/* * The ioctl interface. */static int ub_bd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ struct gendisk *disk = inode->i_bdev->bd_disk; void __user *usermem = (void __user *) arg; return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);}/* * This is called once a new disk was seen by the block layer or by ub_probe(). * The main onjective here is to discover the features of the media such as * the capacity, read-only status, etc. USB storage generally does not * need to be spun up, but if we needed it, this would be the place. * * This call can sleep. * * The return code is not used. */static int ub_bd_revalidate(struct gendisk *disk){ struct ub_lun *lun = disk->private_data; ub_revalidate(lun->udev, lun); /* XXX Support sector size switching like in sr.c */ blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); set_capacity(disk, lun->capacity.nsec); // set_disk_ro(sdkp->disk, lun->readonly); return 0;}/* * The check is called by the block layer to verify if the media * is still available. It is supposed to be harmless, lightweight and * non-intrusive in case the media was not changed. * * This call can sleep. * * The return code is bool! */static int ub_bd_media_changed(struct gendisk *disk){ struct ub_lun *lun = disk->private_data; if (!lun->removable) return 0; /* * We clean checks always after every command, so this is not * as dangerous as it looks. If the TEST_UNIT_READY fails here, * the device is actually not ready with operator or software * intervention required. One dangerous item might be a drive which * spins itself down, and come the time to write dirty pages, this * will fail, then block layer discards the data. Since we never * spin drives up, such devices simply cannot be used with ub anyway. */ if (ub_sync_tur(lun->udev, lun) != 0) { lun->changed = 1; return 1; } return lun->changed;}static struct block_device_operations ub_bd_fops = { .owner = THIS_MODULE, .open = ub_bd_open, .release = ub_bd_release, .ioctl = ub_bd_ioctl, .media_changed = ub_bd_media_changed, .revalidate_disk = ub_bd_revalidate,};/* * Common ->done routine for commands executed synchronously. */static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd){ struct completion *cop = cmd->back; complete(cop);}/* * Test if the device has a check condition on it, synchronously. */static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun){ struct ub_scsi_cmd *cmd; enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; unsigned long flags; struct completion compl; int rc; init_completion(&compl); rc = -ENOMEM; if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) goto err_alloc; cmd->cdb[0] = TEST_UNIT_READY; cmd->cdb_len = 6; cmd->dir = UB_DIR_NONE; cmd->state = UB_CMDST_INIT; cmd->lun = lun; /* This may be NULL, but that's ok */ cmd->done = ub_probe_done; cmd->back = &compl; spin_lock_irqsave(sc->lock, flags); cmd->tag = sc->tagcnt++; rc = ub_submit_scsi(sc, cmd); spin_unlock_irqrestore(sc->lock, flags); if (rc != 0) goto err_submit; wait_for_completion(&compl); rc = cmd->error; if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ rc = cmd->key;err_submit: kfree(cmd);err_alloc: return rc;}/* * Read the SCSI capacity synchronously (for probing). */static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, struct ub_capacity *ret){ struct ub_scsi_cmd *cmd; struct scatterlist *sg; char *p; enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; unsigned long flags; unsigned int bsize, shift; unsigned long nsec; struct completion compl; int rc; init_completion(&compl); rc = -ENOMEM; if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) goto err_alloc; p = (char *)cmd + sizeof(struct ub_scsi_cmd); cmd->cdb[0] = 0x25; cmd->cdb_len = 10; cmd->dir = UB_DIR_READ; cmd->state = UB_CMDST_INIT; cmd->nsg = 1; sg = &cmd->sgv[0]; sg_init_table(sg, UB_MAX_REQ_SG); sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); cmd->len = 8; cmd->lun = lun; cmd->done = ub_probe_done; cmd->back = &compl; spin_lock_irqsave(sc->lock, flags); cmd->tag = sc->tagcnt++; rc = ub_submit_scsi(sc, cmd); spin_unlock_irqrestore(sc->lock, flags); if (rc != 0) goto err_submit; wait_for_completion(&compl); if (cmd->error != 0) { rc = -EIO; goto err_read; } if (cmd->act_len != 8) { rc = -EIO; goto err_read; } /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ nsec = be32_to_cpu(*(__be32 *)p) + 1; bsize = be32_to_cpu(*(__be32 *)(p + 4)); switch (bsize) { case 512: shift = 0; break; case 1024: shift = 1; break; case 2048: shift = 2; break; case 4096: shift = 3; break; default: rc = -EDOM; goto err_inv_bsize; } ret->bsize = bsize; ret->bshift = shift; ret->nsec = nsec << shift; rc = 0;err_inv_bsize:err_read:err_submit: kfree(cmd);err_alloc: return rc;}/* */static void ub_probe_urb_complete(struct urb *urb){ struct completion *cop = urb->context; complete(cop);}static void ub_probe_timeout(unsigned long arg){ struct completion *cop = (struct completion *) arg; complete(cop);}/* * Reset with a Bulk reset. */static int ub_sync_reset(struct ub_dev *sc){ int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; struct usb_ctrlrequest *cr; struct completion compl; struct timer_list timer; int rc; init_completion(&compl); cr = &sc->work_cr; cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; cr->bRequest = US_BULK_RESET_REQUEST; cr->wValue = cpu_to_le16(0); cr->wIndex = cpu_to_le16(ifnum); cr->wLength = cpu_to_le16(0); usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { printk(KERN_WARNING "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); return rc; } init_timer(&timer); timer.function = ub_probe_timeout; timer.data = (unsigned long) &compl; timer.expires = jiffies + UB_CTRL_TIMEOUT; add_timer(&timer); wait_for_completion(&compl); del_timer_sync(&timer); usb_kill_urb(&sc->work_urb); return sc->work_urb.status;}/* * Get number of LUNs by the way of Bulk GetMaxLUN command. */static int ub_sync_getmaxlun(struct ub_dev *sc){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -