⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ub.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	}}/* * Final cleanup and deallocation. */static void ub_cleanup(struct ub_dev *sc){	struct list_head *p;	struct ub_lun *lun;	struct request_queue *q;	while (!list_empty(&sc->luns)) {		p = sc->luns.next;		lun = list_entry(p, struct ub_lun, link);		list_del(p);		/* I don't think queue can be NULL. But... Stolen from sx8.c */		if ((q = lun->disk->queue) != NULL)			blk_cleanup_queue(q);		/*		 * If we zero disk->private_data BEFORE put_disk, we have		 * to check for NULL all over the place in open, release,		 * check_media and revalidate, because the block level		 * semaphore is well inside the put_disk.		 * But we cannot zero after the call, because *disk is gone.		 * The sd.c is blatantly racy in this area.		 */		/* disk->private_data = NULL; */		put_disk(lun->disk);		lun->disk = NULL;		ub_id_put(lun->id);		kfree(lun);	}	usb_set_intfdata(sc->intf, NULL);	usb_put_intf(sc->intf);	usb_put_dev(sc->dev);	kfree(sc);}/* * The "command allocator". */static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun){	struct ub_scsi_cmd *ret;	if (lun->cmda[0])		return NULL;	ret = &lun->cmdv[0];	lun->cmda[0] = 1;	return ret;}static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd){	if (cmd != &lun->cmdv[0]) {		printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",		    lun->name, cmd);		return;	}	if (!lun->cmda[0]) {		printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);		return;	}	lun->cmda[0] = 0;}/* * The command queue. */static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	if (t->qlen++ == 0) {		t->head = cmd;		t->tail = cmd;	} else {		t->tail->next = cmd;		t->tail = cmd;	}	if (t->qlen > t->qmax)		t->qmax = t->qlen;}static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	if (t->qlen++ == 0) {		t->head = cmd;		t->tail = cmd;	} else {		cmd->next = t->head;		t->head = cmd;	}	if (t->qlen > t->qmax)		t->qmax = t->qlen;}static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	struct ub_scsi_cmd *cmd;	if (t->qlen == 0)		return NULL;	if (--t->qlen == 0)		t->tail = NULL;	cmd = t->head;	t->head = cmd->next;	cmd->next = NULL;	return cmd;}#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)/* * The request function is our main entry point */static void ub_request_fn(struct request_queue *q){	struct ub_lun *lun = q->queuedata;	struct request *rq;	while ((rq = elv_next_request(q)) != NULL) {		if (ub_request_fn_1(lun, rq) != 0) {			blk_stop_queue(q);			break;		}	}}static int ub_request_fn_1(struct ub_lun *lun, struct request *rq){	struct ub_dev *sc = lun->udev;	struct ub_scsi_cmd *cmd;	struct ub_request *urq;	int n_elem;	if (atomic_read(&sc->poison)) {		blkdev_dequeue_request(rq);		ub_end_rq(rq, DID_NO_CONNECT << 16);		return 0;	}	if (lun->changed && !blk_pc_request(rq)) {		blkdev_dequeue_request(rq);		ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);		return 0;	}	if (lun->urq.rq != NULL)		return -1;	if ((cmd = ub_get_cmd(lun)) == NULL)		return -1;	memset(cmd, 0, sizeof(struct ub_scsi_cmd));	sg_init_table(cmd->sgv, UB_MAX_REQ_SG);	blkdev_dequeue_request(rq);	urq = &lun->urq;	memset(urq, 0, sizeof(struct ub_request));	urq->rq = rq;	/*	 * get scatterlist from block layer	 */	n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);	if (n_elem < 0) {		/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */		printk(KERN_INFO "%s: failed request map (%d)\n",		    lun->name, n_elem);		goto drop;	}	if (n_elem > UB_MAX_REQ_SG) {	/* Paranoia */		printk(KERN_WARNING "%s: request with %d segments\n",		    lun->name, n_elem);		goto drop;	}	urq->nsg = n_elem;	sc->sg_stat[n_elem < 5 ? n_elem : 5]++;	if (blk_pc_request(rq)) {		ub_cmd_build_packet(sc, lun, cmd, urq);	} else {		ub_cmd_build_block(sc, lun, cmd, urq);	}	cmd->state = UB_CMDST_INIT;	cmd->lun = lun;	cmd->done = ub_rw_cmd_done;	cmd->back = urq;	cmd->tag = sc->tagcnt++;	if (ub_submit_scsi(sc, cmd) != 0)		goto drop;	return 0;drop:	ub_put_cmd(lun, cmd);	ub_end_rq(rq, DID_ERROR << 16);	return 0;}static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,    struct ub_scsi_cmd *cmd, struct ub_request *urq){	struct request *rq = urq->rq;	unsigned int block, nblks;	if (rq_data_dir(rq) == WRITE)		cmd->dir = UB_DIR_WRITE;	else		cmd->dir = UB_DIR_READ;	cmd->nsg = urq->nsg;	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);	/*	 * build the command	 *	 * The call to blk_queue_hardsect_size() guarantees that request	 * is aligned, but it is given in terms of 512 byte units, always.	 */	block = rq->sector >> lun->capacity.bshift;	nblks = rq->nr_sectors >> lun->capacity.bshift;	cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;	/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */	cmd->cdb[2] = block >> 24;	cmd->cdb[3] = block >> 16;	cmd->cdb[4] = block >> 8;	cmd->cdb[5] = block;	cmd->cdb[7] = nblks >> 8;	cmd->cdb[8] = nblks;	cmd->cdb_len = 10;	cmd->len = rq->nr_sectors * 512;}static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,    struct ub_scsi_cmd *cmd, struct ub_request *urq){	struct request *rq = urq->rq;	if (rq->data_len == 0) {		cmd->dir = UB_DIR_NONE;	} else {		if (rq_data_dir(rq) == WRITE)			cmd->dir = UB_DIR_WRITE;		else			cmd->dir = UB_DIR_READ;	}	cmd->nsg = urq->nsg;	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);	memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);	cmd->cdb_len = rq->cmd_len;	cmd->len = rq->data_len;}static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_lun *lun = cmd->lun;	struct ub_request *urq = cmd->back;	struct request *rq;	unsigned int scsi_status;	rq = urq->rq;	if (cmd->error == 0) {		if (blk_pc_request(rq)) {			if (cmd->act_len >= rq->data_len)				rq->data_len = 0;			else				rq->data_len -= cmd->act_len;		}		scsi_status = 0;	} else {		if (blk_pc_request(rq)) {			/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */			memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);			rq->sense_len = UB_SENSE_SIZE;			if (sc->top_sense[0] != 0)				scsi_status = SAM_STAT_CHECK_CONDITION;			else				scsi_status = DID_ERROR << 16;		} else {			if (cmd->error == -EIO) {				if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)					return;			}			scsi_status = SAM_STAT_CHECK_CONDITION;		}	}	urq->rq = NULL;	ub_put_cmd(lun, cmd);	ub_end_rq(rq, scsi_status);	blk_start_queue(lun->disk->queue);}static void ub_end_rq(struct request *rq, unsigned int scsi_status){	int uptodate;	if (scsi_status == 0) {		uptodate = 1;	} else {		uptodate = 0;		rq->errors = scsi_status;	}	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);	end_that_request_last(rq, uptodate);}static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,    struct ub_request *urq, struct ub_scsi_cmd *cmd){	if (atomic_read(&sc->poison))		return -ENXIO;	ub_reset_enter(sc, urq->current_try);	if (urq->current_try >= 3)		return -EIO;	urq->current_try++;	/* Remove this if anyone complains of flooding. */	printk(KERN_DEBUG "%s: dir %c len/act %d/%d "	    "[sense %x %02x %02x] retry %d\n",	    sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,	    cmd->key, cmd->asc, cmd->ascq, urq->current_try);	memset(cmd, 0, sizeof(struct ub_scsi_cmd));	ub_cmd_build_block(sc, lun, cmd, urq);	cmd->state = UB_CMDST_INIT;	cmd->lun = lun;	cmd->done = ub_rw_cmd_done;	cmd->back = urq;	cmd->tag = sc->tagcnt++;#if 0 /* Wasteful */	return ub_submit_scsi(sc, cmd);#else	ub_cmdq_add(sc, cmd);	return 0;#endif}/* * Submit a regular SCSI operation (not an auto-sense). * * The Iron Law of Good Submit Routine is: * Zero return - callback is done, Nonzero return - callback is not done. * No exceptions. * * Host is assumed locked. */static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	if (cmd->state != UB_CMDST_INIT ||	    (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {		return -EINVAL;	}	ub_cmdq_add(sc, cmd);	/*	 * We can call ub_scsi_dispatch(sc) right away here, but it's a little	 * safer to jump to a tasklet, in case upper layers do something silly.	 */	tasklet_schedule(&sc->tasklet);	return 0;}/* * Submit the first URB for the queued command. * This function does not deal with queueing in any way. */static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct bulk_cb_wrap *bcb;	int rc;	bcb = &sc->work_bcb;	/*	 * ``If the allocation length is eighteen or greater, and a device	 * server returns less than eithteen bytes of data, the application	 * client should assume that the bytes not transferred would have been	 * zeroes had the device server returned those bytes.''	 *	 * We zero sense for all commands so that when a packet request	 * fails it does not return a stale sense.	 */	memset(&sc->top_sense, 0, UB_SENSE_SIZE);	/* set up the command wrapper */	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);	bcb->Tag = cmd->tag;		/* Endianness is not important */	bcb->DataTransferLength = cpu_to_le32(cmd->len);	bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;	bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;	bcb->Length = cmd->cdb_len;	/* copy the command payload */	memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);	UB_INIT_COMPLETION(sc->work_done);	sc->last_pipe = sc->send_bulk_pipe;	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,	    bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);	/* Fill what we shouldn't be filling, because usb-storage did so. */	sc->work_urb.actual_length = 0;	sc->work_urb.error_count = 0;	sc->work_urb.status = 0;	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {		/* XXX Clear stalls */		ub_complete(&sc->work_done);		return rc;	}	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;	add_timer(&sc->work_timer);	cmd->state = UB_CMDST_CMD;	return 0;}/* * Timeout handler. */static void ub_urb_timeout(unsigned long arg){	struct ub_dev *sc = (struct ub_dev *) arg;	unsigned long flags;	spin_lock_irqsave(sc->lock, flags);	if (!ub_is_completed(&sc->work_done))		usb_unlink_urb(&sc->work_urb);	spin_unlock_irqrestore(sc->lock, flags);}/* * Completion routine for the work URB. * * This can be called directly from usb_submit_urb (while we have * the sc->lock taken) and from an interrupt (while we do NOT have * the sc->lock taken). Therefore, bounce this off to a tasklet. */static void ub_urb_complete(struct urb *urb){	struct ub_dev *sc = urb->context;	ub_complete(&sc->work_done);	tasklet_schedule(&sc->tasklet);}static void ub_scsi_action(unsigned long _dev){	struct ub_dev *sc = (struct ub_dev *) _dev;	unsigned long flags;	spin_lock_irqsave(sc->lock, flags);	ub_scsi_dispatch(sc);	spin_unlock_irqrestore(sc->lock, flags);}static void ub_scsi_dispatch(struct ub_dev *sc){	struct ub_scsi_cmd *cmd;	int rc;	while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {		if (cmd->state == UB_CMDST_DONE) {			ub_cmdq_pop(sc);			(*cmd->done)(sc, cmd);		} else if (cmd->state == UB_CMDST_INIT) {			if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -