⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ub.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 4 页
字号:
			/*			 * ub_state_stat only not dropping the count...			 */			UB_INIT_COMPLETION(sc->work_done);			sc->last_pipe = sc->recv_bulk_pipe;			usb_fill_bulk_urb(&sc->work_urb, sc->dev,			    sc->recv_bulk_pipe, &sc->work_bcs,			    US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);			sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;			sc->work_urb.actual_length = 0;			sc->work_urb.error_count = 0;			sc->work_urb.status = 0;			sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;			add_timer(&sc->work_timer);			rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC);			if (rc != 0) {				/* XXX Clear stalls */				printk("%s: CSW #%d submit failed (%d)\n",				   sc->name, cmd->tag, rc); /* P3 */				del_timer(&sc->work_timer);				ub_complete(&sc->work_done);				ub_state_done(sc, cmd, rc);				return;			}			return;		}		/*		 * Check the returned Bulk protocol status.		 */		bcs = &sc->work_bcs;		rc = le32_to_cpu(bcs->Residue);		if (rc != cmd->len - cmd->act_len) {			/*			 * It is all right to transfer less, the caller has			 * to check. But it's not all right if the device			 * counts disagree with our counts.			 */			/* P3 */ printk("%s: resid %d len %d act %d\n",			    sc->name, rc, cmd->len, cmd->act_len);			goto Bad_End;		}		if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) &&		    bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) {			/* XXX Rate-limit, even for P3 tagged */			/* P3 */ printk("ub: signature 0x%x\n", bcs->Signature);			/* Windows ignores signatures, so do we. */		}		if (bcs->Tag != cmd->tag) {			/* P3 */ printk("%s: tag orig 0x%x reply 0x%x\n",			    sc->name, cmd->tag, bcs->Tag);			goto Bad_End;		}		switch (bcs->Status) {		case US_BULK_STAT_OK:			break;		case US_BULK_STAT_FAIL:			ub_state_sense(sc, cmd);			return;		case US_BULK_STAT_PHASE:			/* XXX We must reset the transport here */			/* P3 */ printk("%s: status PHASE\n", sc->name);			goto Bad_End;		default:			printk(KERN_INFO "%s: unknown CSW status 0x%x\n",			    sc->name, bcs->Status);			goto Bad_End;		}		/* Not zeroing error to preserve a babble indicator */		cmd->state = UB_CMDST_DONE;		ub_cmdtr_state(sc, cmd);		ub_cmdq_pop(sc);		(*cmd->done)(sc, cmd);	} else if (cmd->state == UB_CMDST_SENSE) {		/* 		 * We do not look at sense, because even if there was no sense,		 * we get into UB_CMDST_SENSE from a STALL or CSW FAIL only.		 * We request sense because we want to clear CHECK CONDITION		 * on devices with delusions of SCSI, and not because we		 * are curious in any way about the sense itself.		 */		/* if ((cmd->top_sense[2] & 0x0F) == NO_SENSE) { foo } */		ub_state_done(sc, cmd, -EIO);	} else {		printk(KERN_WARNING "%s: "		    "wrong command state %d on device %u\n",		    sc->name, cmd->state, sc->dev->devnum);		goto Bad_End;	}	return;Bad_End: /* Little Excel is dead */	ub_state_done(sc, cmd, -EIO);}/* * Factorization helper for the command state machine: * Finish the command. */static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc){	cmd->error = rc;	cmd->state = UB_CMDST_DONE;	ub_cmdtr_state(sc, cmd);	ub_cmdq_pop(sc);	(*cmd->done)(sc, cmd);}/* * Factorization helper for the command state machine: * Submit a CSW read and go to STAT state. */static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	int rc;	UB_INIT_COMPLETION(sc->work_done);	sc->last_pipe = sc->recv_bulk_pipe;	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,	    &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);	sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;	sc->work_urb.actual_length = 0;	sc->work_urb.error_count = 0;	sc->work_urb.status = 0;	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;	add_timer(&sc->work_timer);	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {		/* XXX Clear stalls */		printk("ub: CSW #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */		del_timer(&sc->work_timer);		ub_complete(&sc->work_done);		ub_state_done(sc, cmd, rc);		return;	}	cmd->stat_count = 0;	cmd->state = UB_CMDST_STAT;	ub_cmdtr_state(sc, cmd);}/* * Factorization helper for the command state machine: * Submit a REQUEST SENSE and go to SENSE state. */static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd *scmd;	int rc;	if (cmd->cdb[0] == REQUEST_SENSE) {		rc = -EPIPE;		goto error;	}	memset(&sc->top_sense, 0, UB_SENSE_SIZE);	scmd = &sc->top_rqs_cmd;	scmd->cdb[0] = REQUEST_SENSE;	scmd->cdb_len = 6;	scmd->dir = UB_DIR_READ;	scmd->state = UB_CMDST_INIT;	scmd->data = sc->top_sense;	scmd->len = UB_SENSE_SIZE;	scmd->done = ub_top_sense_done;	scmd->back = cmd;	scmd->tag = sc->tagcnt++;	cmd->state = UB_CMDST_SENSE;	ub_cmdtr_state(sc, cmd);	ub_cmdq_insert(sc, scmd);	return;error:	ub_state_done(sc, cmd, rc);}/* * A helper for the command's state machine: * Submit a stall clear. */static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,    int stalled_pipe){	int endp;	struct usb_ctrlrequest *cr;	int rc;	endp = usb_pipeendpoint(stalled_pipe);	if (usb_pipein (stalled_pipe))		endp |= USB_DIR_IN;	cr = &sc->work_cr;	cr->bRequestType = USB_RECIP_ENDPOINT;	cr->bRequest = USB_REQ_CLEAR_FEATURE;	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);	cr->wIndex = cpu_to_le16(endp);	cr->wLength = cpu_to_le16(0);	UB_INIT_COMPLETION(sc->work_done);	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,	    (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);	sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;	sc->work_urb.actual_length = 0;	sc->work_urb.error_count = 0;	sc->work_urb.status = 0;	sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;	add_timer(&sc->work_timer);	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {		del_timer(&sc->work_timer);		ub_complete(&sc->work_done);		return rc;	}	return 0;}/* */static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd){	unsigned char *sense = scmd->data;	struct ub_scsi_cmd *cmd;	ub_cmdtr_sense(sc, scmd, sense);	if ((cmd = ub_cmdq_peek(sc)) == NULL) {		printk(KERN_WARNING "%s: sense done while idle\n", sc->name);		return;	}	if (cmd != scmd->back) {		printk(KERN_WARNING "%s: "		    "sense done for wrong command 0x%x on device %u\n",		    sc->name, cmd->tag, sc->dev->devnum);		return;	}	if (cmd->state != UB_CMDST_SENSE) {		printk(KERN_WARNING "%s: "		    "sense done with bad cmd state %d on device %u\n",		    sc->name, cmd->state, sc->dev->devnum);		return;	}	ub_scsi_urb_compl(sc, cmd);}#if 0/* Determine what the maximum LUN supported is */int usb_stor_Bulk_max_lun(struct us_data *us){	int result;	/* issue the command */	result = usb_stor_control_msg(us, us->recv_ctrl_pipe,				 US_BULK_GET_MAX_LUN, 				 USB_DIR_IN | USB_TYPE_CLASS | 				 USB_RECIP_INTERFACE,				 0, us->ifnum, us->iobuf, 1, HZ);	/* 	 * Some devices (i.e. Iomega Zip100) need this -- apparently	 * the bulk pipes get STALLed when the GetMaxLUN request is	 * processed.   This is, in theory, harmless to all other devices	 * (regardless of if they stall or not).	 */	if (result < 0) {		usb_stor_clear_halt(us, us->recv_bulk_pipe);		usb_stor_clear_halt(us, us->send_bulk_pipe);	}	US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 		  result, us->iobuf[0]);	/* if we have a successful request, return the result */	if (result == 1)		return us->iobuf[0];	/* return the default -- no LUNs */	return 0;}#endif/* * This is called from a process context. */static void ub_revalidate(struct ub_dev *sc){	sc->readonly = 0;	/* XXX Query this from the device */	/*	 * XXX sd.c sets capacity to zero in such case. However, it doesn't	 * work for us. In case of zero capacity, block layer refuses to	 * have the /dev/uba opened (why?) Set capacity to some random value.	 */	sc->capacity.nsec = 50;	sc->capacity.bsize = 512;	sc->capacity.bshift = 0;	if (ub_sync_tur(sc) != 0)		return;			/* Not ready */	sc->changed = 0;	if (ub_sync_read_cap(sc, &sc->capacity) != 0) {		/*		 * The retry here means something is wrong, either with the		 * device, with the transport, or with our code.		 * We keep this because sd.c has retries for capacity.		 */		if (ub_sync_read_cap(sc, &sc->capacity) != 0) {			sc->capacity.nsec = 100;			sc->capacity.bsize = 512;			sc->capacity.bshift = 0;		}	}}/* * The open funcion. * This is mostly needed to keep refcounting, but also to support * media checks on removable media drives. */static int ub_bd_open(struct inode *inode, struct file *filp){	struct gendisk *disk = inode->i_bdev->bd_disk;	struct ub_dev *sc;	unsigned long flags;	int rc;	if ((sc = disk->private_data) == NULL)		return -ENXIO;	spin_lock_irqsave(&ub_lock, flags);	if (atomic_read(&sc->poison)) {		spin_unlock_irqrestore(&ub_lock, flags);		return -ENXIO;	}	sc->openc++;	spin_unlock_irqrestore(&ub_lock, flags);	if (sc->removable || sc->readonly)		check_disk_change(inode->i_bdev);	/* XXX sd.c and floppy.c bail on open if media is not present. */	if (sc->readonly && (filp->f_mode & FMODE_WRITE)) {		rc = -EROFS;		goto err_open;	}	return 0;err_open:	spin_lock_irqsave(&ub_lock, flags);	--sc->openc;	if (sc->openc == 0 && atomic_read(&sc->poison))		ub_cleanup(sc);	spin_unlock_irqrestore(&ub_lock, flags);	return rc;}/* */static int ub_bd_release(struct inode *inode, struct file *filp){	struct gendisk *disk = inode->i_bdev->bd_disk;	struct ub_dev *sc = disk->private_data;	unsigned long flags;	spin_lock_irqsave(&ub_lock, flags);	--sc->openc;	if (sc->openc == 0 && atomic_read(&sc->poison))		ub_cleanup(sc);	spin_unlock_irqrestore(&ub_lock, flags);	return 0;}/* * The ioctl interface. */static int ub_bd_ioctl(struct inode *inode, struct file *filp,    unsigned int cmd, unsigned long arg){// void __user *usermem = (void *) arg;// struct carm_port *port = ino->i_bdev->bd_disk->private_data;// struct hd_geometry geom;#if 0	switch (cmd) {	case HDIO_GETGEO:		if (usermem == NULL)		// XXX Bizzare. Why?			return -EINVAL;		geom.heads = (u8) port->dev_geom_head;		geom.sectors = (u8) port->dev_geom_sect;		geom.cylinders = port->dev_geom_cyl;		geom.start = get_start_sect(ino->i_bdev);		if (copy_to_user(usermem, &geom, sizeof(geom)))			return -EFAULT;		return 0;	default: ;	}#endif	return -ENOTTY;}/* * This is called once a new disk was seen by the block layer or by ub_probe(). * The main onjective here is to discover the features of the media such as * the capacity, read-only status, etc. USB storage generally does not * need to be spun up, but if we needed it, this would be the place. * * This call can sleep. * * The return code is not used. */static int ub_bd_revalidate(struct gendisk *disk){	struct ub_dev *sc = disk->private_data;	ub_revalidate(sc);	/* This is pretty much a long term P3 */	printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",	    sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize);	set_capacity(disk, sc->capacity.nsec);	// set_disk_ro(sdkp->disk, sc->readonly);	return 0;}/* * The check is called by the block layer to verify if the media * is still available. It is supposed to be harmless, lightweight and * non-intrusive in case the media was not changed. * * This call can sleep. * * The return code is bool! */static int ub_bd_media_changed(struct gendisk *disk){	struct ub_dev *sc = disk->private_data;	if (!sc->removable)		return 0;	/*	 * We clean checks always after every command, so this is not	 * as dangerous as it looks. If the TEST_UNIT_READY fails here,	 * the device is actually not ready with operator or software	 * intervention required. One dangerous item might be a drive which	 * spins itself down, and come the time to write dirty pages, this	 * will fail, then block layer discards the data. Since we never	 * spin drives up, such devices simply cannot be used with ub anyway.	 */	if (ub_sync_tur(sc) != 0) {		sc->changed = 1;		/* P3 */ printk("%s: made changed\n", sc->name);		return 1;	}	/* The sd.c clears this before returning (one-shot flag). Why? */	/* P3 */ printk("%s: %s changed\n", sc->name,	    sc->changed? "is": "was not");	return sc->changed;}static struct block_device_operations ub_bd_fops = {	.owner		= THIS_MODULE,	.open		= ub_bd_open,	.release	= ub_bd_release,	.ioctl		= ub_bd_ioctl,	.media_changed	= ub_bd_media_changed,	.revalidate_disk = ub_bd_revalidate,};/* * Common ->done routine for commands executed synchronously. */static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct completion *cop = cmd->back;	complete(cop);}/* * Test if the device has a check condition on it, synchronously. */static int ub_sync_tur(struct ub_dev *sc){	struct ub_scsi_cmd *cmd;	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };	unsigned long flags;	struct completion compl;	int rc;	init_completion(&compl);	rc = -ENOMEM;	if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)		goto err_alloc;	memset(cmd, 0, ALLOC_SIZE);	cmd->cdb[0] = TEST_UNIT_READY;	cmd->cdb_len = 6;	cmd->dir = UB_DIR_NONE;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -