⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ub.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 4 页
字号:
 * The command queue. */static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	if (t->qlen++ == 0) {		t->head = cmd;		t->tail = cmd;	} else {		t->tail->next = cmd;		t->tail = cmd;	}	if (t->qlen > t->qmax)		t->qmax = t->qlen;}static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	if (t->qlen++ == 0) {		t->head = cmd;		t->tail = cmd;	} else {		cmd->next = t->head;		t->head = cmd;	}	if (t->qlen > t->qmax)		t->qmax = t->qlen;}static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	struct ub_scsi_cmd *cmd;	if (t->qlen == 0)		return NULL;	if (--t->qlen == 0)		t->tail = NULL;	cmd = t->head;	t->head = cmd->next;	cmd->next = NULL;	return cmd;}#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)/* * The request function is our main entry point */static inline int ub_bd_rq_fn_1(request_queue_t *q){#if 0	int writing = 0, pci_dir, i, n_elem;	u32 tmp;	unsigned int msg_size;#endif	struct ub_dev *sc = q->queuedata;	struct request *rq;#if 0 /* We use rq->buffer for now */	struct scatterlist *sg;	int n_elem;#endif	struct ub_scsi_cmd *cmd;	int ub_dir;	unsigned int block, nblks;	int rc;	if ((rq = elv_next_request(q)) == NULL)		return 1;	if (atomic_read(&sc->poison) || sc->changed) {		blkdev_dequeue_request(rq);		ub_end_rq(rq, 0);		return 0;	}	if ((cmd = ub_get_cmd(sc)) == NULL) {		blk_stop_queue(q);		return 1;	}	blkdev_dequeue_request(rq);	if (rq_data_dir(rq) == WRITE)		ub_dir = UB_DIR_WRITE;	else		ub_dir = UB_DIR_READ;	/*	 * get scatterlist from block layer	 */#if 0 /* We use rq->buffer for now */	sg = &cmd->sgv[0];	n_elem = blk_rq_map_sg(q, rq, sg);	if (n_elem <= 0) {		ub_put_cmd(sc, cmd);		ub_end_rq(rq, 0);		blk_start_queue(q);		return 0;		/* request with no s/g entries? */	}	if (n_elem != 1) {		/* Paranoia */		printk(KERN_WARNING "%s: request with %d segments\n",		    sc->name, n_elem);		ub_put_cmd(sc, cmd);		ub_end_rq(rq, 0);		blk_start_queue(q);		return 0;	}#endif	/*	 * XXX Unfortunately, this check does not work. It is quite possible	 * to get bogus non-null rq->buffer if you allow sg by mistake.	 */	if (rq->buffer == NULL) {		/*		 * This must not happen if we set the queue right.		 * The block level must create bounce buffers for us.		 */		static int do_print = 1;		if (do_print) {			printk(KERN_WARNING "%s: unmapped request\n", sc->name);			do_print = 0;		}		ub_put_cmd(sc, cmd);		ub_end_rq(rq, 0);		blk_start_queue(q);		return 0;	}	/*	 * build the command	 */	block = rq->sector;	nblks = rq->nr_sectors;	memset(cmd, 0, sizeof(struct ub_scsi_cmd));	cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10;	/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */	cmd->cdb[2] = block >> 24;	cmd->cdb[3] = block >> 16;	cmd->cdb[4] = block >> 8;	cmd->cdb[5] = block;	cmd->cdb[7] = nblks >> 8;	cmd->cdb[8] = nblks;	cmd->cdb_len = 10;	cmd->dir = ub_dir;	cmd->state = UB_CMDST_INIT;	cmd->data = rq->buffer;	cmd->len = nblks * 512;	cmd->done = ub_rw_cmd_done;	cmd->back = rq;	cmd->tag = sc->tagcnt++;	if ((rc = ub_submit_scsi(sc, cmd)) != 0) {		ub_put_cmd(sc, cmd);		ub_end_rq(rq, 0);		blk_start_queue(q);		return 0;	}	return 0;}static void ub_bd_rq_fn(request_queue_t *q){	do { } while (ub_bd_rq_fn_1(q) == 0);}static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct request *rq = cmd->back;	struct gendisk *disk = sc->disk;	request_queue_t *q = disk->queue;	int uptodate;	if (cmd->error == 0)		uptodate = 1;	else		uptodate = 0;	ub_put_cmd(sc, cmd);	ub_end_rq(rq, uptodate);	blk_start_queue(q);}static void ub_end_rq(struct request *rq, int uptodate){	int rc;	rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors);	// assert(rc == 0);	end_that_request_last(rq);}/* * Submit a regular SCSI operation (not an auto-sense). * * The Iron Law of Good Submit Routine is: * Zero return - callback is done, Nonzero return - callback is not done. * No exceptions. * * Host is assumed locked. * * XXX We only support Bulk for the moment. */static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	if (cmd->state != UB_CMDST_INIT ||	    (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {		return -EINVAL;	}	ub_cmdq_add(sc, cmd);	/*	 * We can call ub_scsi_dispatch(sc) right away here, but it's a little	 * safer to jump to a tasklet, in case upper layers do something silly.	 */	tasklet_schedule(&sc->tasklet);	return 0;}/* * Submit the first URB for the queued command. * This function does not deal with queueing in any way. */static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct bulk_cb_wrap *bcb;	int rc;	bcb = &sc->work_bcb;	/* set up the command wrapper */	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);	bcb->Tag = cmd->tag;		/* Endianness is not important */	bcb->DataTransferLength = cpu_to_le32(cmd->len);	bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;	bcb->Lun = 0;			/* No multi-LUN yet */	bcb->Length = cmd->cdb_len;	/* copy the command payload */	memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);	UB_INIT_COMPLETION(sc->work_done);	sc->last_pipe = sc->send_bulk_pipe;	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,	    bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);	sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;	/* Fill what we shouldn't be filling, because usb-storage did so. */	sc->work_urb.actual_length = 0;	sc->work_urb.error_count = 0;	sc->work_urb.status = 0;	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;	add_timer(&sc->work_timer);	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {		/* XXX Clear stalls */		printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */		del_timer(&sc->work_timer);		ub_complete(&sc->work_done);		return rc;	}	cmd->state = UB_CMDST_CMD;	ub_cmdtr_state(sc, cmd);	return 0;}/* * Timeout handler. */static void ub_urb_timeout(unsigned long arg){	struct ub_dev *sc = (struct ub_dev *) arg;	unsigned long flags;	spin_lock_irqsave(&sc->lock, flags);	usb_unlink_urb(&sc->work_urb);	spin_unlock_irqrestore(&sc->lock, flags);}/* * Completion routine for the work URB. * * This can be called directly from usb_submit_urb (while we have * the sc->lock taken) and from an interrupt (while we do NOT have * the sc->lock taken). Therefore, bounce this off to a tasklet. */static void ub_urb_complete(struct urb *urb, struct pt_regs *pt){	struct ub_dev *sc = urb->context;	ub_complete(&sc->work_done);	tasklet_schedule(&sc->tasklet);}static void ub_scsi_action(unsigned long _dev){	struct ub_dev *sc = (struct ub_dev *) _dev;	unsigned long flags;	spin_lock_irqsave(&sc->lock, flags);	ub_scsi_dispatch(sc);	spin_unlock_irqrestore(&sc->lock, flags);}static void ub_scsi_dispatch(struct ub_dev *sc){	struct ub_scsi_cmd *cmd;	int rc;	while ((cmd = ub_cmdq_peek(sc)) != NULL) {		if (cmd->state == UB_CMDST_DONE) {			ub_cmdq_pop(sc);			(*cmd->done)(sc, cmd);		} else if (cmd->state == UB_CMDST_INIT) {			ub_cmdtr_new(sc, cmd);			if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)				break;			cmd->error = rc;			cmd->state = UB_CMDST_DONE;			ub_cmdtr_state(sc, cmd);		} else {			if (!ub_is_completed(&sc->work_done))				break;			ub_scsi_urb_compl(sc, cmd);		}	}}static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct urb *urb = &sc->work_urb;	struct bulk_cs_wrap *bcs;	int pipe;	int rc;/* P3 */ /** printk("ub: urb status %d pipe 0x%08x len %d act %d\n", urb->status, urb->pipe, urb->transfer_buffer_length, urb->actual_length); **/	if (atomic_read(&sc->poison)) {		/* A little too simplistic, I feel... */		goto Bad_End;	}	if (cmd->state == UB_CMDST_CLEAR) {		if (urb->status == -EPIPE) {			/*			 * STALL while clearning STALL.			 * A STALL is illegal on a control pipe!			 * XXX Might try to reset the device here and retry.			 */			printk(KERN_NOTICE "%s: "			    "stall on control pipe for device %u\n",			    sc->name, sc->dev->devnum);			goto Bad_End;		}		/*		 * We ignore the result for the halt clear.		 */		/* reset the endpoint toggle */		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),			usb_pipeout(sc->last_pipe), 0);		ub_state_sense(sc, cmd);	} else if (cmd->state == UB_CMDST_CLR2STS) {		if (urb->status == -EPIPE) {			/*			 * STALL while clearning STALL.			 * A STALL is illegal on a control pipe!			 * XXX Might try to reset the device here and retry.			 */			printk(KERN_NOTICE "%s: "			    "stall on control pipe for device %u\n",			    sc->name, sc->dev->devnum);			goto Bad_End;		}		/*		 * We ignore the result for the halt clear.		 */		/* reset the endpoint toggle */		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),			usb_pipeout(sc->last_pipe), 0);		ub_state_stat(sc, cmd);	} else if (cmd->state == UB_CMDST_CMD) {		if (urb->status == -EPIPE) {			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);			if (rc != 0) {				printk(KERN_NOTICE "%s: "				    "unable to submit clear for device %u (%d)\n",				    sc->name, sc->dev->devnum, rc);				/*				 * This is typically ENOMEM or some other such shit.				 * Retrying is pointless. Just do Bad End on it...				 */				goto Bad_End;			}			cmd->state = UB_CMDST_CLEAR;			ub_cmdtr_state(sc, cmd);			return;		}		if (urb->status != 0)			goto Bad_End;		if (urb->actual_length != US_BULK_CB_WRAP_LEN) {			/* XXX Must do reset here to unconfuse the device */			goto Bad_End;		}		if (cmd->dir == UB_DIR_NONE) {			ub_state_stat(sc, cmd);			return;		}		UB_INIT_COMPLETION(sc->work_done);		if (cmd->dir == UB_DIR_READ)			pipe = sc->recv_bulk_pipe;		else			pipe = sc->send_bulk_pipe;		sc->last_pipe = pipe;		usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,		    cmd->data, cmd->len, ub_urb_complete, sc);		sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;		sc->work_urb.actual_length = 0;		sc->work_urb.error_count = 0;		sc->work_urb.status = 0;		sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;		add_timer(&sc->work_timer);		if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {			/* XXX Clear stalls */			printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */			del_timer(&sc->work_timer);			ub_complete(&sc->work_done);			ub_state_done(sc, cmd, rc);			return;		}		cmd->state = UB_CMDST_DATA;		ub_cmdtr_state(sc, cmd);	} else if (cmd->state == UB_CMDST_DATA) {		if (urb->status == -EPIPE) {			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);			if (rc != 0) {				printk(KERN_NOTICE "%s: "				    "unable to submit clear for device %u (%d)\n",				    sc->name, sc->dev->devnum, rc);				/*				 * This is typically ENOMEM or some other such shit.				 * Retrying is pointless. Just do Bad End on it...				 */				goto Bad_End;			}			cmd->state = UB_CMDST_CLR2STS;			ub_cmdtr_state(sc, cmd);			return;		}		if (urb->status == -EOVERFLOW) {			/*			 * A babble? Failure, but we must transfer CSW now.			 */			cmd->error = -EOVERFLOW;	/* A cheap trick... */		} else {			if (urb->status != 0)				goto Bad_End;		}		cmd->act_len = urb->actual_length;		ub_cmdtr_act_len(sc, cmd);		ub_state_stat(sc, cmd);	} else if (cmd->state == UB_CMDST_STAT) {		if (urb->status == -EPIPE) {			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);			if (rc != 0) {				printk(KERN_NOTICE "%s: "				    "unable to submit clear for device %u (%d)\n",				    sc->name, sc->dev->devnum, rc);				/*				 * This is typically ENOMEM or some other such shit.				 * Retrying is pointless. Just do Bad End on it...				 */				goto Bad_End;			}			cmd->state = UB_CMDST_CLEAR;			ub_cmdtr_state(sc, cmd);			return;		}		if (urb->status != 0)			goto Bad_End;		if (urb->actual_length == 0) {			/*			 * Some broken devices add unnecessary zero-length			 * packets to the end of their data transfers.			 * Such packets show up as 0-length CSWs. If we			 * encounter such a thing, try to read the CSW again.			 */			if (++cmd->stat_count >= 4) {				printk(KERN_NOTICE "%s: "				    "unable to get CSW on device %u\n",				    sc->name, sc->dev->devnum);				goto Bad_End;			}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -