📄 ide.c
字号:
* drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. */static void drive_cmd_intr (ide_drive_t *drive){ struct request *rq = HWGROUP(drive)->rq; byte *args = (byte *) rq->buffer; byte stat = GET_STAT(); int retries = 10; ide__sti(); /* local CPU only */ if ((stat & DRQ_STAT) && args && args[3]) { byte io_32bit = drive->io_32bit; drive->io_32bit = 0; ide_input_data(drive, &args[4], args[3] * SECTOR_WORDS); drive->io_32bit = io_32bit; while (((stat = GET_STAT()) & BUSY_STAT) && retries--) udelay(100); } if (OK_STAT(stat, READY_STAT, BAD_STAT)) ide_end_drive_cmd (drive, stat, GET_ERR()); else ide_error(drive, "drive_cmd", stat); /* calls ide_end_drive_cmd */}/* * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT * commands to a drive. It used to do much more, but has been scaled back. */static inline void do_special (ide_drive_t *drive){ special_t *s = &drive->special;#ifdef DEBUG printk("%s: do_special: 0x%02x\n", drive->name, s->all);#endif if (s->b.set_tune) { ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc; s->b.set_tune = 0; if (tuneproc != NULL) tuneproc(drive, drive->tune_req); } else if (drive->driver != NULL) { DRIVER(drive)->special(drive); } else if (s->all) { printk("%s: bad special flag: 0x%02x\n", drive->name, s->all); s->all = 0; }}/* * This routine busy-waits for the drive status to be not "busy". * It then checks the status for all of the "good" bits and none * of the "bad" bits, and if all is okay it returns 0. All other * cases return 1 after invoking ide_error() -- caller should just return. * * This routine should get fixed to not hog the cpu during extra long waits.. * That could be done by busy-waiting for the first jiffy or two, and then * setting a timer to wake up at half second intervals thereafter, * until timeout is achieved, before timing out. */int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout){ byte stat; int i; unsigned long flags; udelay(1); /* spec allows drive 400ns to assert "BUSY" */ if ((stat = GET_STAT()) & BUSY_STAT) { __save_flags(flags); /* local CPU only */ ide__sti(); /* local CPU only */ timeout += jiffies; while ((stat = GET_STAT()) & BUSY_STAT) { if (0 < (signed long)(jiffies - timeout)) { __restore_flags(flags); /* local CPU only */ ide_error(drive, "status timeout", stat); return 1; } } __restore_flags(flags); /* local CPU only */ } /* * Allow status to settle, then read it again. * A few rare drives vastly violate the 400ns spec here, * so we'll wait up to 10usec for a "good" status * rather than expensively fail things immediately. * This fix courtesy of Matthew Faupel & Niccolo Rigacci. */ for (i = 0; i < 10; i++) { udelay(1); if (OK_STAT((stat = GET_STAT()), good, bad)) return 0; } ide_error(drive, "status error", stat); return 1;}/* * execute_drive_cmd() issues a special drive command, * usually initiated by ioctl() from the external hdparm program. */static void execute_drive_cmd (ide_drive_t *drive, struct request *rq){ byte *args = rq->buffer; if (args) {#ifdef DEBUG printk("%s: DRIVE_CMD cmd=0x%02x sc=0x%02x fr=0x%02x xx=0x%02x\n", drive->name, args[0], args[1], args[2], args[3]);#endif if (args[0] == WIN_SMART) { OUT_BYTE(0x4f, IDE_LCYL_REG); OUT_BYTE(0xc2, IDE_HCYL_REG); } OUT_BYTE(args[2],IDE_FEATURE_REG); ide_cmd(drive, args[0], args[1], &drive_cmd_intr); return; } else { /* * NULL is actually a valid way of waiting for * all current requests to be flushed from the queue. */#ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name);#endif ide_end_drive_cmd(drive, GET_STAT(), GET_ERR()); return; }}/* * start_request() initiates handling of a new I/O request */static inline void start_request (ide_drive_t *drive){ unsigned long block, blockend; struct request *rq = drive->queue; unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS; ide_hwif_t *hwif = HWIF(drive); ide__sti(); /* local CPU only */#ifdef DEBUG printk("%s: start_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);#endif if (unit >= MAX_DRIVES) { printk("%s: bad device number: %s\n", hwif->name, kdevname(rq->rq_dev)); goto kill_rq; }#ifdef DEBUG if (rq->bh && !buffer_locked(rq->bh)) { printk("%s: block not locked\n", drive->name); goto kill_rq; }#endif block = rq->sector; blockend = block + rq->nr_sectors; if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) { printk("%s%c: bad access: block=%ld, count=%ld\n", drive->name, (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors); goto kill_rq; } block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0;#if FAKE_FDISK_FOR_EZDRIVE if (block == 0 && drive->remap_0_to_1) block = 1; /* redirect MBR access to EZ-Drive partn table */#endif /* FAKE_FDISK_FOR_EZDRIVE */#if (DISK_RECOVERY_TIME > 0) while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);#endif SELECT_DRIVE(hwif, drive); if (ide_wait_stat(drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { printk("%s: drive not ready for command\n", drive->name); return; } if (!drive->special.all) { if (rq->cmd == IDE_DRIVE_CMD) { execute_drive_cmd(drive, rq); return; } if (drive->driver != NULL) { DRIVER(drive)->do_request(drive, rq, block); return; } printk("%s: media type %d not supported\n", drive->name, drive->media); goto kill_rq; } do_special(drive); return;kill_rq: if (drive->driver != NULL) DRIVER(drive)->end_request(0, HWGROUP(drive)); else ide_end_request(0, HWGROUP(drive));}/* * ide_stall_queue() can be used by a drive to give excess bandwidth back * to the hwgroup by sleeping for timeout jiffies. */void ide_stall_queue (ide_drive_t *drive, unsigned long timeout){ if (timeout > WAIT_WORSTCASE) timeout = WAIT_WORSTCASE; drive->sleep = timeout + jiffies;}#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)/* * choose_drive() selects the next drive which will be serviced. */static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup){ ide_drive_t *drive, *best;repeat: best = NULL; drive = hwgroup->drive; do { if (drive->queue && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) { if (!best || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep))) || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive)))) { struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major]; if (bdev->current_request != &bdev->plug) best = drive; } } } while ((drive = drive->next) != hwgroup->drive); if (best && best->nice1 && !best->sleep && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { long t = (signed long)(WAKEUP(best) - jiffies); if (t >= WAIT_MIN_SLEEP) { /* * We *may* have some time to spare, but first let's see if * someone can potentially benefit from our nice mood today.. */ drive = best->next; do { if (!drive->sleep && 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time)) && 0 < (signed long)((jiffies + t) - WAKEUP(drive))) { ide_stall_queue(best, IDE_MIN(t, 10 * WAIT_MIN_SLEEP)); goto repeat; } } while ((drive = drive->next) != best); } } return best;}/* * Caller must have already acquired spinlock using *spinflags */static void ide_do_request (ide_hwgroup_t *hwgroup, unsigned long *hwgroup_flags, int masked_irq){ struct blk_dev_struct *bdev; ide_drive_t *drive; ide_hwif_t *hwif; unsigned long io_flags; hwgroup->busy = 1; while (hwgroup->handler == NULL) { spin_lock_irqsave(&io_request_lock, io_flags); drive = choose_drive(hwgroup); if (drive == NULL) { unsigned long sleep = 0; hwgroup->rq = NULL; drive = hwgroup->drive; do { bdev = &blk_dev[HWIF(drive)->major]; if (bdev->current_request != &bdev->plug) /* FIXME: this will do for now */ bdev->current_request = NULL; /* (broken since patch-2.1.15) */ if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep))) sleep = drive->sleep; } while ((drive = drive->next) != hwgroup->drive); spin_unlock_irqrestore(&io_request_lock, io_flags); if (sleep) { if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep)) sleep = jiffies + WAIT_MIN_SLEEP;#if 1 if (hwgroup->timer.next || hwgroup->timer.prev) printk("ide_set_handler: timer already active\n");#endif mod_timer(&hwgroup->timer, sleep); } else { /* Ugly, but how can we sleep for the lock otherwise? perhaps from tq_scheduler? */ ide_release_lock(&ide_lock); /* for atari only */ } hwgroup->busy = 0; return; } hwif = HWIF(drive); if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) /* set nIEN for previous hwif */ OUT_BYTE(hwgroup->drive->ctl|2, hwgroup->hwif->io_ports[IDE_CONTROL_OFFSET]); hwgroup->hwif = hwif; hwgroup->drive = drive; drive->sleep = 0; drive->service_start = jiffies; bdev = &blk_dev[hwif->major]; if (bdev->current_request == &bdev->plug) /* FIXME: paranoia */ printk("%s: Huh? nuking plugged queue\n", drive->name); bdev->current_request = hwgroup->rq = drive->queue; spin_unlock_irqrestore(&io_request_lock, io_flags); if (hwif->irq != masked_irq) disable_irq(hwif->irq); spin_unlock_irqrestore(&hwgroup->spinlock, *hwgroup_flags); start_request(drive); spin_lock_irqsave(&hwgroup->spinlock, *hwgroup_flags); if (hwif->irq != masked_irq) enable_irq(hwif->irq); }}/* * ide_get_queue() returns the queue which corresponds to a given device. */struct request **ide_get_queue (kdev_t dev){ ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data; return &hwif->drives[DEVICE_NR(dev) & 1].queue;}/* * do_hwgroup_request() invokes ide_do_request() after claiming hwgroup->busy. */static void do_hwgroup_request (ide_hwgroup_t *hwgroup){ unsigned long flags; spin_lock_irqsave(&hwgroup->spinlock, flags); if (hwgroup->busy) { spin_unlock_irqrestore(&hwgroup->spinlock, flags); return; } del_timer(&hwgroup->timer); ide_get_lock(&ide_lock, ide_intr, hwgroup); /* for atari only */ ide_do_request(hwgroup, &flags, 0); spin_unlock_irqrestore(&hwgroup->spinlock, flags);}/* * ll_rw_blk.c invokes our do_idex_request() function * with the io_request_spinlock already grabbed. * Since we need to do our own spinlock's internally, * on paths that don't necessarily originate through the * do_idex_request() path, we have to undo the spinlock on entry, * and restore it again on exit. * Fortunately, this is mostly a nop for non-SMP kernels. */static inline void unlock_do_hwgroup_request (ide_hwgroup_t *hwgroup){ spin_unlock(&io_request_lock); do_hwgroup_request (hwgroup); spin_lock_irq(&io_request_lock);}void do_ide0_request (void){ unlock_do_hwgroup_request (ide_hwifs[0].hwgroup);}#if MAX_HWIFS > 1void do_ide1_request (void){ unlock_do_hwgroup_request (ide_hwifs[1].hwgroup);}#endif /* MAX_HWIFS > 1 */#if MAX_HWIFS > 2void do_ide2_request (void){ unlock_do_hwgroup_request (ide_hwifs[2].hwgroup);}#endif /* MAX_HWIFS > 2 */#if MAX_HWIFS > 3void do_ide3_request (void){ unlock_do_hwgroup_request (ide_hwifs[3].hwgroup);}#endif /* MAX_HWIFS > 3 */#if MAX_HWIFS > 4void do_ide4_request (void){ unlock_do_hwgroup_request (ide_hwifs[4].hwgroup);}#endif /* MAX_HWIFS > 4 */#if MAX_HWIFS > 5void do_ide5_request (void){ unlock_do_hwgroup_request (ide_hwifs[5].hwgroup);}#endif /* MAX_HWIFS > 5 */static void start_next_request (ide_hwgroup_t *hwgroup, int masked_irq){ unsigned long flags; ide_drive_t *drive; spin_lock_irqsave(&hwgroup->spinlock, flags); if (hwgroup->handler != NULL) { spin_unlock_irqrestore(&hwgroup->spinlock, flags); return; } drive = hwgroup->drive; set_recovery_timer(HWIF(drive)); drive->service_time = jiffies - drive->service_start; ide_do_request(hwgroup, &flags, masked_irq); spin_unlock_irqrestore(&hwgroup->spinlock, flags);}void ide_timer_expiry (unsigned long data){ ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; ide_drive_t *drive; ide_handler_t *handler; unsigned long flags; spin_lock_irqsave(&hwgroup->spinlock, flags); drive = hwgroup->drive; if ((handler = hwgroup->handler) == NULL) { spin_unlock_irqrestore(&hwgroup->spinlock, flags); do_hwgroup_request(hwgroup); return; } hwgroup->busy = 1; /* should already be "1" */ hwgroup->handler = NULL;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -