📄 icside.c
字号:
{ struct hd_driveid *id = drive->id; ide_hwif_t *hwif = HWIF(drive); int xfer_mode = XFER_PIO_2; int on; if (!id || !(id->capability & 1) || !hwif->autodma) goto out; /* * Consult the list of known "bad" drives */ if (in_drive_list(drive->id, drive_blacklist)) { printk("%s: Disabling DMA for %s (blacklisted)\n", drive->name, drive->id->model); goto out; } /* * Enable DMA on any drive that has multiword DMA */ if (id->field_valid & 2) { if (id->dma_mword & 4) { xfer_mode = XFER_MW_DMA_2; } else if (id->dma_mword & 2) { xfer_mode = XFER_MW_DMA_1; } else if (id->dma_mword & 1) { xfer_mode = XFER_MW_DMA_0; } goto out; } /* * Consult the list of known "good" drives */ if (in_drive_list(drive->id, drive_whitelist)) { if (id->eide_dma_time > 150) goto out; xfer_mode = XFER_MW_DMA_1; }out: on = icside_set_speed(drive, xfer_mode); icside_dma_enable(drive, on, 0); return 0;}static int icside_dma_stop(ide_drive_t *drive){ ide_hwif_t *hwif = HWIF(drive); drive->waiting_for_dma = 0; disable_dma(hwif->hw.dma); /* Teardown mappings after DMA has completed. */ pci_unmap_sg(NULL, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); return get_dma_residue(hwif->hw.dma) != 0;}static void icside_dma_start(ide_drive_t *drive){ ide_hwif_t *hwif = HWIF(drive); /* We can not enable DMA on both channels simultaneously. */ BUG_ON(dma_channel_active(hwif->hw.dma)); enable_dma(hwif->hw.dma);}/* * dma_intr() is the handler for disk read/write DMA interrupts */static ide_startstop_t icside_dmaintr(ide_drive_t *drive){ unsigned int stat; int dma_stat; dma_stat = icside_dma_stop(drive); stat = GET_STAT(); if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) { if (!dma_stat) { struct request *rq = HWGROUP(drive)->rq; int i; for (i = rq->nr_sectors; i > 0;) { i -= rq->current_nr_sectors; ide_end_request(1, HWGROUP(drive)); } return ide_stopped; } printk(KERN_ERR "%s: bad DMA status (dma_stat=%x)\n", drive->name, dma_stat); } return ide_error(drive, "dma_intr", stat);}static inticside_dma_common(ide_drive_t *drive, struct request *rq, unsigned int dma_mode){ ide_hwif_t *hwif = HWIF(drive); unsigned int count; /* * We can not enable DMA on both channels. */ BUG_ON(hwif->sg_dma_active); BUG_ON(dma_channel_active(hwif->hw.dma)); if (rq->cmd == IDE_DRIVE_TASKFILE) { ide_build_tf_sglist(hwif, rq); } else { ide_build_rq_sglist(hwif, rq); } count = pci_map_sg(NULL, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); if (!count) return 1; /* * Ensure that we have the right interrupt routed. */ icside_maskproc(drive, 0); /* * Route the DMA signals to the correct interface. */ outb(hwif->select_data, hwif->config_data); /* * Select the correct timing for this drive. */ set_dma_speed(hwif->hw.dma, drive->drive_data); /* * Tell the DMA engine about the SG table and * data direction. */ set_dma_sg(hwif->hw.dma, hwif->sg_table, count); set_dma_mode(hwif->hw.dma, dma_mode); return 0;}static int icside_dma_init(ide_drive_t *drive, struct request *rq, int rd){ u8 cmd; if (icside_dma_common(drive, rq, rd ? DMA_MODE_READ : DMA_MODE_WRITE)) return 1; drive->waiting_for_dma = 1; if (drive->media != ide_disk) return 0; ide_set_handler(drive, icside_dmaintr, 2*WAIT_CMD, NULL); if (rq->cmd == IDE_DRIVE_TASKFILE && drive->addressing == 1) { ide_task_t *args = rq->special; cmd = args->tfRegister[IDE_COMMAND_OFFSET]; } else if (drive->addressing) { cmd = rd ? WIN_READDMA_EXT : WIN_WRITEDMA_EXT; } else { cmd = rd ? WIN_READDMA : WIN_WRITEDMA; } OUT_BYTE(cmd, IDE_COMMAND_REG); icside_dma_start(drive); return 0;}static int icside_irq_status(ide_drive_t *drive){ ide_hwif_t *hwif = HWIF(drive); struct icside_state *state = hwif->hw.priv; return inb(state->irq_port + (hwif->channel ? ICS_ARCIN_V6_INTRSTAT_2 : ICS_ARCIN_V6_INTRSTAT_1)) & 1;}static int icside_dma_verbose(ide_drive_t *drive){ printk(", %s (peak %dMB/s)", ide_xfer_verbose(drive->current_speed), 2000 / drive->drive_data); return 1;}static void icside_dma_timeout(ide_drive_t *drive){ printk(KERN_ERR "%s: DMA timeout occured: ", drive->name); ide_dump_status(drive, "DMA timeout", GET_STAT());}static void icside_irq_lost(ide_drive_t *drive){ printk(KERN_ERR "%s: IRQ lost\n", drive->name);}static int icside_dmaproc(ide_dma_action_t func, ide_drive_t *drive){ switch (func) { case ide_dma_off: case ide_dma_off_quietly: icside_dma_enable(drive, 0, func == ide_dma_off_quietly); return 0; case ide_dma_on: drive->using_dma = 1; icside_dma_enable(drive, drive->using_dma, 0); return 0; case ide_dma_check: return icside_dma_check(drive); case ide_dma_read: case ide_dma_write: return icside_dma_init(drive, HWGROUP(drive)->rq, func == ide_dma_read); case ide_dma_begin: icside_dma_start(drive); return 0; case ide_dma_end: return icside_dma_stop(drive); case ide_dma_test_irq: return icside_irq_status(drive); case ide_dma_bad_drive: case ide_dma_good_drive: /* we check our own internal lists; ide never calls these */ break; case ide_dma_verbose: return icside_dma_verbose(drive); case ide_dma_timeout: icside_dma_timeout(drive); return 1; case ide_dma_lostirq: icside_irq_lost(drive); return 1; case ide_dma_retune: /* not implemented in 2.4 */ break; } printk("icside_dmaproc: unsupported %s (%d) function\n", ide_dmafunc_verbose(func), func); return 1;}static int icside_setup_dma(ide_hwif_t *hwif){ int autodma = 0;#ifdef CONFIG_IDEDMA_ICS_AUTO autodma = 1;#endif printk(" %s: SG-DMA", hwif->name); hwif->sg_table = kmalloc(sizeof(struct scatterlist) * NR_ENTRIES, GFP_KERNEL); if (!hwif->sg_table) goto failed; hwif->dmatable_cpu = NULL; hwif->dmatable_dma = 0; hwif->speedproc = icside_set_speed; hwif->dmaproc = icside_dmaproc; hwif->autodma = autodma; printk(" capable%s\n", autodma ? ", auto-enable" : ""); return 1;failed: printk(" disabled, unable to allocate DMA table\n"); return 0;}int ide_release_dma(ide_hwif_t *hwif){ if (hwif->sg_table) { kfree(hwif->sg_table); hwif->sg_table = NULL; } return 1;}#endifstatic ide_hwif_t *icside_find_hwif(unsigned long dataport){ ide_hwif_t *hwif; int index; for (index = 0; index < MAX_HWIFS; ++index) { hwif = &ide_hwifs[index]; if (hwif->io_ports[IDE_DATA_OFFSET] == (ide_ioreg_t)dataport) goto found; } for (index = 0; index < MAX_HWIFS; ++index) { hwif = &ide_hwifs[index]; if (!hwif->io_ports[IDE_DATA_OFFSET]) goto found; } hwif = NULL;found: return hwif;}static ide_hwif_t *icside_setup(unsigned long base, struct cardinfo *info, int irq){ unsigned long port = base + info->dataoffset; ide_hwif_t *hwif; hwif = icside_find_hwif(base); if (hwif) { int i; memset(&hwif->hw, 0, sizeof(hw_regs_t)); for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { hwif->hw.io_ports[i] = (ide_ioreg_t)port; hwif->io_ports[i] = (ide_ioreg_t)port; port += 1 << info->stepping; } hwif->hw.io_ports[IDE_CONTROL_OFFSET] = base + info->ctrloffset; hwif->io_ports[IDE_CONTROL_OFFSET] = base + info->ctrloffset; hwif->hw.irq = irq; hwif->irq = irq; hwif->hw.dma = NO_DMA; hwif->noprobe = 0; hwif->chipset = ide_acorn; } return hwif;}static int __init icside_register_v5(struct expansion_card *ec){ unsigned long slot_port; ide_hwif_t *hwif; slot_port = ecard_address(ec, ECARD_MEMC, 0); ec->irqaddr = (unsigned char *)ioaddr(slot_port + ICS_ARCIN_V5_INTRSTAT); ec->irqmask = 1; ec->irq_data = (void *)slot_port; ec->ops = (expansioncard_ops_t *)&icside_ops_arcin_v5; /* * Be on the safe side - disable interrupts */ inb(slot_port + ICS_ARCIN_V5_INTROFFSET); hwif = icside_setup(slot_port, &icside_cardinfo_v5, ec->irq); return hwif ? 0 : -ENODEV;}static int __init icside_register_v6(struct expansion_card *ec){ unsigned long slot_port, port; struct icside_state *state; ide_hwif_t *hwif, *mate; unsigned int sel = 0; slot_port = ecard_address(ec, ECARD_IOC, ECARD_FAST); port = ecard_address(ec, ECARD_EASI, ECARD_FAST); if (port == 0) port = slot_port; else sel = 1 << 5; outb(sel, slot_port); /* * Be on the safe side - disable interrupts */ inb(port + ICS_ARCIN_V6_INTROFFSET_1); inb(port + ICS_ARCIN_V6_INTROFFSET_2); /* * Find and register the interfaces. */ hwif = icside_setup(port, &icside_cardinfo_v6_1, ec->irq); mate = icside_setup(port, &icside_cardinfo_v6_2, ec->irq); if (!hwif || !mate) return -ENODEV; state = kmalloc(sizeof(struct icside_state), GFP_KERNEL); if (!state) return -ENOMEM; state->channel = 0; state->enabled = 0; state->irq_port = port; ec->irq_data = state; ec->ops = (expansioncard_ops_t *)&icside_ops_arcin_v6; hwif->maskproc = icside_maskproc; hwif->channel = 0; hwif->hw.priv = state; hwif->mate = mate; hwif->serialized = 1; hwif->config_data = slot_port; hwif->select_data = sel; hwif->hw.dma = ec->dma; mate->maskproc = icside_maskproc; mate->channel = 1; mate->hw.priv = state; mate->mate = hwif; mate->serialized = 1; mate->config_data = slot_port; mate->select_data = sel | 1; mate->hw.dma = ec->dma;#ifdef CONFIG_BLK_DEV_IDEDMA_ICS if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { icside_setup_dma(hwif); icside_setup_dma(mate); }#endif return 0;}int __init icside_init(void){ ecard_startfind (); do { struct expansion_card *ec; int result; ec = ecard_find(0, icside_cids); if (ec == NULL) break; ecard_claim(ec); switch (icside_identifyif(ec)) { case ics_if_arcin_v5: result = icside_register_v5(ec); break; case ics_if_arcin_v6: result = icside_register_v6(ec); break; default: result = -1; break; } if (result) ecard_release(ec); } while (1); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -