⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ide3.c

📁 ucos在三星s3c44b0上的ide程序
💻 C
📖 第 1 页 / 共 5 页
字号:
			return 0;	}	*startstop = ide_error(drive, "status error", stat);	return 1;}/* * execute_drive_cmd() issues a special drive command, * usually initiated by ioctl() from the external hdparm program. */static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq){ 	switch(rq->cmd) { 		case IDE_DRIVE_TASKFILE: 		{ 			ide_task_t *args = rq->special;  			if (!(args)) break; #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG	{	printk(KERN_INFO "%s: ", drive->name);//	printk("TF.0=x%02x ", args->tfRegister[IDE_DATA_OFFSET]);	printk("TF.1=x%02x ", args->tfRegister[IDE_FEATURE_OFFSET]);	printk("TF.2=x%02x ", args->tfRegister[IDE_NSECTOR_OFFSET]);	printk("TF.3=x%02x ", args->tfRegister[IDE_SECTOR_OFFSET]);	printk("TF.4=x%02x ", args->tfRegister[IDE_LCYL_OFFSET]);	printk("TF.5=x%02x ", args->tfRegister[IDE_HCYL_OFFSET]);	printk("TF.6=x%02x ", args->tfRegister[IDE_SELECT_OFFSET]);	printk("TF.7=x%02x\n", args->tfRegister[IDE_COMMAND_OFFSET]);	printk(KERN_INFO "%s: ", drive->name);//	printk("HTF.0=x%02x ", args->hobRegister[IDE_DATA_OFFSET_HOB]);	printk("HTF.1=x%02x ", args->hobRegister[IDE_FEATURE_OFFSET_HOB]);	printk("HTF.2=x%02x ", args->hobRegister[IDE_NSECTOR_OFFSET_HOB]);	printk("HTF.3=x%02x ", args->hobRegister[IDE_SECTOR_OFFSET_HOB]);	printk("HTF.4=x%02x ", args->hobRegister[IDE_LCYL_OFFSET_HOB]);	printk("HTF.5=x%02x ", args->hobRegister[IDE_HCYL_OFFSET_HOB]);	printk("HTF.6=x%02x ", args->hobRegister[IDE_SELECT_OFFSET_HOB]);	printk("HTF.7=x%02x\n", args->hobRegister[IDE_CONTROL_OFFSET_HOB]);	}#endif /* CONFIG_IDE_TASK_IOCTL_DEBUG *///			if (args->tf_out_flags.all == 0) { 			do_taskfile(drive, 				(struct hd_drive_task_hdr *)&args->tfRegister,				(struct hd_drive_hob_hdr *)&args->hobRegister, 				args->handler);//			} else {//				return flagged_taskfile(drive, args);//			}  			if (((args->command_type == IDE_DRIVE_TASK_RAW_WRITE) || 			     (args->command_type == IDE_DRIVE_TASK_OUT)) &&			      args->prehandler && args->handler)				return args->prehandler(drive, rq); 			return ide_started; 		} 		case IDE_DRIVE_TASK: 		{ 			byte *args = rq->buffer; 			byte sel;  			if (!(args)) break;#ifdef DEBUG 			printk("%s: DRIVE_TASK_CMD ", drive->name); 			printk("cmd=0x%02x ", args[0]); 			printk("fr=0x%02x ", args[1]); 			printk("ns=0x%02x ", args[2]); 			printk("sc=0x%02x ", args[3]); 			printk("lcyl=0x%02x ", args[4]); 			printk("hcyl=0x%02x ", args[5]); 			printk("sel=0x%02x\n", args[6]);#endif 			OUT_BYTE(args[1], IDE_FEATURE_REG); 			OUT_BYTE(args[3], IDE_SECTOR_REG); 			OUT_BYTE(args[4], IDE_LCYL_REG); 			OUT_BYTE(args[5], IDE_HCYL_REG); 			sel = (args[6] & ~0x10); 			if (drive->select.b.unit) 				sel |= 0x10; 			OUT_BYTE(sel, IDE_SELECT_REG); 			ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 			return ide_started; 		} 		case IDE_DRIVE_CMD: 		{ 			byte *args = rq->buffer;  			if (!(args)) break;#ifdef DEBUG 			printk("%s: DRIVE_CMD ", drive->name); 			printk("cmd=0x%02x ", args[0]); 			printk("sc=0x%02x ", args[1]); 			printk("fr=0x%02x ", args[2]); 			printk("xx=0x%02x\n", args[3]);#endif 			if (args[0] == WIN_SMART) { 				OUT_BYTE(0x4f, IDE_LCYL_REG); 				OUT_BYTE(0xc2, IDE_HCYL_REG); 				OUT_BYTE(args[2],IDE_FEATURE_REG); 				OUT_BYTE(args[1],IDE_SECTOR_REG); 				ide_cmd(drive, args[0], args[3], &drive_cmd_intr); 				return ide_started; 			} 			OUT_BYTE(args[2],IDE_FEATURE_REG); 			ide_cmd(drive, args[0], args[1], &drive_cmd_intr); 			return ide_started; 		} 		default: 			break; 	} 	/* 	 * NULL is actually a valid way of waiting for 	 * all current requests to be flushed from the queue. 	 */#ifdef DEBUG 	printk("%s: DRIVE_CMD (null)\n", drive->name);#endif 	ide_end_drive_cmd(drive, GET_STAT(), GET_ERR()); 	return ide_stopped;}/* * start_request() initiates handling of a new I/O request * needed to reverse the perverted changes anonymously made back * 2.3.99-pre6 */static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq){	ide_startstop_t startstop;	unsigned long block, blockend;	unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;	ide_hwif_t *hwif = HWIF(drive);#ifdef DEBUG	printk("%s: start_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);#endif	/* bail early if we've exceeded max_failures */	if (drive->max_failures && (drive->failures > drive->max_failures)) {		goto kill_rq;	}	if (unit >= MAX_DRIVES) {		printk("%s: bad device number: %s\n", hwif->name, kdevname(rq->rq_dev));		goto kill_rq;	}#ifdef DEBUG	if (rq->bh && !buffer_locked(rq->bh)) {		printk("%s: block not locked\n", drive->name);		goto kill_rq;	}#endif	block    = rq->sector;	blockend = block + rq->nr_sectors;	if ((rq->cmd == READ || rq->cmd == WRITE) &&	    (drive->media == ide_disk || drive->media == ide_floppy)) {		if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) {			printk("%s%c: bad access: block=%ld, count=%ld\n", drive->name,			 (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors);			goto kill_rq;		}		block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0;	}	/* Yecch - this will shift the entire interval,	   possibly killing some innocent following sector */	if (block == 0 && drive->remap_0_to_1 == 1)		block = 1;  /* redirect MBR access to EZ-Drive partn table */#if (DISK_RECOVERY_TIME > 0)	while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);#endif	SELECT_DRIVE(hwif, drive);	if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {		printk("%s: drive not ready for command\n", drive->name);		return startstop;	}	if (!drive->special.all) {		switch(rq->cmd) {			case IDE_DRIVE_CMD:			case IDE_DRIVE_TASK:			case IDE_DRIVE_TASKFILE:				return execute_drive_cmd(drive, rq);			default:				break;		}		if (drive->driver != NULL) {			return (DRIVER(drive)->do_request(drive, rq, block));		}		printk("%s: media type %d not supported\n", drive->name, drive->media);		goto kill_rq;	}	return do_special(drive);kill_rq:	if (drive->driver != NULL)		DRIVER(drive)->end_request(0, HWGROUP(drive));	else		ide_end_request(0, HWGROUP(drive));	return ide_stopped;}ide_startstop_t restart_request (ide_drive_t *drive){	ide_hwgroup_t *hwgroup = HWGROUP(drive);	unsigned long flags;	struct request *rq;	spin_lock_irqsave(&io_request_lock, flags);	hwgroup->handler = NULL;	del_timer(&hwgroup->timer);	rq = hwgroup->rq;	spin_unlock_irqrestore(&io_request_lock, flags);	return start_request(drive, rq);}/* * ide_stall_queue() can be used by a drive to give excess bandwidth back * to the hwgroup by sleeping for timeout jiffies. */void ide_stall_queue (ide_drive_t *drive, unsigned long timeout){	if (timeout > WAIT_WORSTCASE)		timeout = WAIT_WORSTCASE;	drive->sleep = timeout + jiffies;}#define WAKEUP(drive)	((drive)->service_start + 2 * (drive)->service_time)/* * choose_drive() selects the next drive which will be serviced. */static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup){	ide_drive_t *drive, *best;repeat:		best = NULL;	drive = hwgroup->drive;	do {		if (!list_empty(&drive->queue.queue_head) && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {			if (!best			 || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))			 || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))			{				if( !drive->queue.plugged )					best = drive;			}		}	} while ((drive = drive->next) != hwgroup->drive);	if (best && best->nice1 && !best->sleep && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {		long t = (signed long)(WAKEUP(best) - jiffies);		if (t >= WAIT_MIN_SLEEP) {			/*			 * We *may* have some time to spare, but first let's see if			 * someone can potentially benefit from our nice mood today..			 */			drive = best->next;			do {				if (!drive->sleep				 && 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time))				 && 0 < (signed long)((jiffies + t) - WAKEUP(drive)))				{					ide_stall_queue(best, IDE_MIN(t, 10 * WAIT_MIN_SLEEP));					goto repeat;				}			} while ((drive = drive->next) != best);		}	}	return best;}/* * Issue a new request to a drive from hwgroup * Caller must have already done spin_lock_irqsave(&io_request_lock, ..); * * A hwgroup is a serialized group of IDE interfaces.  Usually there is * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) * may have both interfaces in a single hwgroup to "serialize" access. * Or possibly multiple ISA interfaces can share a common IRQ by being grouped * together into one hwgroup for serialized access. * * Note also that several hwgroups can end up sharing a single IRQ, * possibly along with many other devices.  This is especially common in * PCI-based systems with off-board IDE controller cards. * * The IDE driver uses the single global io_request_lock spinlock to protect * access to the request queues, and to protect the hwgroup->busy flag. * * The first thread into the driver for a particular hwgroup sets the * hwgroup->busy flag to indicate that this hwgroup is now active, * and then initiates processing of the top request from the request queue. * * Other threads attempting entry notice the busy setting, and will simply * queue their new requests and exit immediately.  Note that hwgroup->busy * remains set even when the driver is merely awaiting the next interrupt. * Thus, the meaning is "this hwgroup is busy processing a request". * * When processing of a request completes, the completing thread or IRQ-handler * will start the next request from the queue.  If no more work remains, * the driver will clear the hwgroup->busy flag and exit. * * The io_request_lock (spinlock) is used to protect all access to the * hwgroup->busy flag, but is otherwise not needed for most processing in * the driver.  This makes the driver much more friendlier to shared IRQs * than previous designs, while remaining 100% (?) SMP safe and capable. *//* --BenH: made non-static as ide-pmac.c uses it to kick the hwgroup back *         into life on wakeup from machine sleep. */ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq){	ide_drive_t	*drive;	ide_hwif_t	*hwif;	struct request	*rq;	ide_startstop_t	startstop;	ide_get_lock(&ide_lock, ide_intr, hwgroup);	/* for atari only: POSSIBLY BROKEN HERE(?) */	__cli();	/* necessary paranoia: ensure IRQs are masked on local CPU */	while (!hwgroup->busy) {		hwgroup->busy = 1;		drive = choose_drive(hwgroup);		if (drive == NULL) {			unsigned long sleep = 0;			hwgroup->rq = NULL;			drive = hwgroup->drive;			do {				if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))					sleep = drive->sleep;			} while ((drive = drive->next) != hwgroup->drive);			if (sleep) {				/*				 * Take a short snooze, and then wake up this hwgroup again.				 * This gives other hwgroups on the same a chance to				 * play fairly with us, just in case there are big differences				 * in relative throughputs.. don't want to hog the cpu too much.				 */				if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep)) 					sleep = jiffies + WAIT_MIN_SLEEP;#if 1				if (timer_pending(&hwgroup->timer))					printk("ide_set_handler: timer already active\n");#endif				hwgroup->sleeping = 1;	/* so that ide_timer_expiry knows what to do */				mod_timer(&hwgroup->timer, sleep);				/* we purposely leave hwgroup->busy==1 while sleeping */			} else {				/* Ugly, but how can we sleep for the lock otherwise? perhaps from tq_disk? */				ide_release_lock(&ide_lock);	/* for atari only */				hwgroup->busy = 0;			}			return;		/* no more work for this hwgroup (for now) */		}		hwif = HWIF(drive);		if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif && hwif->hw.io_ports[IDE_CONTROL_OFFSET]) {			/* set nIEN for previous hwif */			SELECT_INTERRUPT(hwif, drive);		}		hwgroup->hwif = hwif;		hwgroup->drive = drive;		drive->sleep = 0;		drive->service_start = jiffies;		if ( drive->queue.plugged )	/* paranoia */			printk("%s: Huh? nuking plugged queue\n", drive->name);		rq = hwgroup->rq = blkdev_entry_next_request(&drive->queue.queue_head);		/*		 * Some systems have trouble with IDE IRQs arriving while		 * the driver is still setting things up.  So, here we disable		 * the IRQ used by this interface while the request is being started.		 * This may look bad at first, but pretty much the same thing		 * happens anyway when any interrupt comes in, IDE or otherwise		 *  -- the kernel masks the IRQ while it is being handled.		 */		if (masked_irq && hwif->hw.irq != masked_irq)			disable_irq_nosync(hwif->hw.irq);		spin_unlock(&io_request_lock);		ide__sti();	/* allow other IRQs while we start this request */		startstop = start_request(drive, rq);		spin_lock_irq(&io_request_lock);		if (masked_irq && hwif->hw.irq != masked_irq)			enable_irq(hwif->hw.irq);		if (startstop == ide_stopped)			hwgroup->busy = 0;	}}/* * ide_get_queue() returns the queue which corresponds to a given device. */request_queue_t *ide_get_queue (kdev_t dev){	ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data;	return &hwif->drives[DEVICE_NR(dev) & 1].queue;}/* * Passes the stuff to ide_do_request */void do_ide_request(request_queue_t *q){	ide_do_request(q->queuedata, 0);}/* * un-busy the hwgroup etc, and clear any pending DMA status. we want to * retry the current request in pio mode instead of risking tossing it

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -