sgiioc4.c
来自「linux-2.4.29操作系统的源码」· C语言 代码 · 共 892 行 · 第 1/2 页
C
892 行
hwif->name, (void *)dma_base, (void *)dma_base + num_ports - 1); return; } hwif->dma_base = dma_base; hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, /* 1 Page */ &hwif->dmatable_dma); if (!hwif->dmatable_cpu) goto dma_alloc_failure; hwif->sg_table = kmalloc(sizeof (struct scatterlist) * IOC4_PRD_ENTRIES, GFP_KERNEL); if (!hwif->sg_table) { pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma); goto dma_alloc_failure; } hwif->dma_base2 = (unsigned long) pci_alloc_consistent(hwif->pci_dev, IOC4_IDE_CACHELINE_SIZE, (dma_addr_t*)&(hwif->dma_status)); if (!hwif->dma_base2) { pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma); kfree(hwif->sg_table); goto dma_alloc_failure; } return; dma_alloc_failure: printk(KERN_INFO "ide_dma_sgiioc4() -- Error! Unable to allocate DMA Maps for drive %s\n", hwif->name); printk(KERN_INFO "Changing from DMA to PIO mode for Drive %s \n", hwif->name); /* Disable DMA because we couldnot allocate any DMA maps */ hwif->autodma = 0; hwif->atapi_dma = 0;}/* Initializes the IOC4 DMA Engine */static voidsgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive){ u32 ioc4_dma; int count; ide_hwif_t *hwif = HWIF(drive); uint64_t dma_base = hwif->dma_base; uint32_t dma_addr, ending_dma_addr; ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); if (ioc4_dma & IOC4_S_DMA_ACTIVE) { printk(KERN_WARNING "sgiioc4_configure_for_dma(%s):Warning!! IOC4 DMA from previous transfer was still active\n", drive->name); hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); count = 0; do { xide_delay(count); ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); count += 10; } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100)); if (ioc4_dma & IOC4_S_DMA_STOP) printk(KERN_ERR "sgiioc4_configure_for__dma(%s) : IOC4 Dma STOP bit is still 1\n", drive->name); } ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); if (ioc4_dma & IOC4_S_DMA_ERROR) { printk(KERN_WARNING "sgiioc4_configure_for__dma(%s) : Warning!! - DMA Error during Previous transfer | status 0x%x \n", drive->name, ioc4_dma); hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); count = 0; do { ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); xide_delay(count); count += 10; } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100)); if (ioc4_dma & IOC4_S_DMA_STOP) printk(KERN_ERR "sgiioc4_configure_for__dma(%s) : IOC4 DMA STOP bit is still 1\n", drive->name); } /* Address of the Scatter Gather List */ dma_addr = cpu_to_le32(hwif->dmatable_dma); hwif->OUTL(dma_addr, dma_base + IOC4_DMA_PTR_L * 4); /* Address of the Ending DMA */ memset((unsigned int *) hwif->dma_base2, 0,IOC4_IDE_CACHELINE_SIZE); ending_dma_addr = cpu_to_le32(hwif->dma_status); hwif->OUTL(ending_dma_addr,dma_base + IOC4_DMA_END_ADDR * 4); hwif->OUTL(dma_direction, dma_base + IOC4_DMA_CTRL * 4); drive->waiting_for_dma = 1;}/* IOC4 Scatter Gather list Format *//* 128 Bit entries to support 64 bit addresses in the future *//* The Scatter Gather list Entry should be in the BIG-ENDIAN Format *//* --------------------------------------------------------------------------- *//* | Upper 32 bits - Zero | Lower 32 bits- address | *//* --------------------------------------------------------------------------- *//* | Upper 32 bits - Zero |EOL| 16 Bit Data Length | *//* --------------------------------------------------------------------------- *//* Creates the scatter gather list, DMA Table */static unsigned intsgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir){ ide_hwif_t *hwif = HWIF(drive); unsigned int *table = hwif->dmatable_cpu; unsigned int count = 0, i = 1; struct scatterlist *sg; if (rq->cmd == IDE_DRIVE_TASKFILE) hwif->sg_nents = i = sgiioc4_ide_raw_build_sglist(hwif, rq); else hwif->sg_nents = i = sgiioc4_ide_build_sglist(hwif, rq, ddir); if (!i) return 0; /* sglist of length Zero */ sg = hwif->sg_table; while (i && sg_dma_len(sg)) { dma_addr_t cur_addr; int cur_len; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); while (cur_len) { if (count++ >= IOC4_PRD_ENTRIES) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); goto use_pio_instead; } else { uint32_t xcount, bcount = 0x10000 - (cur_addr & 0xffff); if (bcount > cur_len) bcount = cur_len; /* put the addr, length in the IOC4 dma-table format */ *table = 0x0; table++; *table = cpu_to_be32(cur_addr); table++; *table = 0x0; table++; xcount = bcount & 0xffff; *table = cpu_to_be32(xcount); table++; cur_addr += bcount; cur_len -= bcount; } } sg++; i--; } if (count) { table--; *table |= cpu_to_be32(0x80000000); return count; } use_pio_instead: pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); hwif->sg_dma_active = 0; return 0; /* revert to PIO for this request */}static intsgiioc4_checkirq(ide_hwif_t * hwif){ uint8_t intr_reg = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4); if (intr_reg & 0x03) return 1; return 0;}static intsgiioc4_clearirq(ide_drive_t * drive){ u32 intr_reg; ide_hwif_t *hwif = HWIF(drive); ide_ioreg_t other_ir = hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2); /* Code to check for PCI error conditions */ intr_reg = hwif->INL(other_ir); if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ u8 stat = hwif->INB(IDE_STATUS_REG); int count = 0; do { xide_delay(count); stat = hwif->INB(IDE_STATUS_REG); /* Removes Interrupt from IDE Device */ } while ((stat & 0x80) && (count++ < 1024)); if (intr_reg & 0x02) { /* Error when transferring DMA data on PCI bus */ uint32_t pci_err_addr_low, pci_err_addr_high, pci_stat_cmd_reg; pci_err_addr_low = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET]); pci_err_addr_high = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET] + 4); pci_read_config_dword(hwif->pci_dev, PCI_COMMAND, &pci_stat_cmd_reg); printk(KERN_ERR "sgiioc4_clearirq(%s) : PCI Bus Error when doing DMA : status-cmd reg is 0x%x \n", drive->name, pci_stat_cmd_reg); printk(KERN_ERR "sgiioc4_clearirq(%s) : PCI Error Address is 0x%x%x \n", drive->name, pci_err_addr_high, pci_err_addr_low); /* Clear the PCI Error indicator */ pci_write_config_dword(hwif->pci_dev, PCI_COMMAND, 0x00000146); } hwif->OUTL(0x03, other_ir); /* Clear the Interrupt, Error bits on the IOC4 */ intr_reg = hwif->INL(other_ir); } return intr_reg;}/* XXX: duplicated code. See PV#: 896400 *//** * "Copied from drivers/ide/ide-dma.c" * sgiioc4_ide_build_sglist - map IDE scatter gather for DMA I/O * @hwif: the interface to build the DMA table for * @rq: the request holding the sg list * @ddir: data direction * * Perform the PCI mapping magic neccessary to access the source * or target buffers of a request via PCI DMA. The lower layers * of the kernel provide the neccessary cache management so that * we can operate in a portable fashion. * * This code is identical to ide_build_sglist in ide-dma.c * however that it not exported and even if it were would create * dependancy problems for modular drivers. */static intsgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq, int ddir){ struct buffer_head *bh; struct scatterlist *sg = hwif->sg_table; unsigned long lastdataend = ~0UL; int nents = 0; if (hwif->sg_dma_active) BUG(); bh = rq->bh; do { int contig = 0; if (bh->b_page) { if (bh_phys(bh) == lastdataend) contig = 1; } else { if ((unsigned long) bh->b_data == lastdataend) contig = 1; } if (contig) { sg[nents - 1].length += bh->b_size; lastdataend += bh->b_size; continue; } if (nents >= PRD_ENTRIES) return 0; memset(&sg[nents], 0, sizeof (*sg)); if (bh->b_page) { sg[nents].page = bh->b_page; sg[nents].offset = bh_offset(bh); lastdataend = bh_phys(bh) + bh->b_size; } else { if ((unsigned long) bh->b_data < PAGE_SIZE) BUG(); sg[nents].address = bh->b_data; lastdataend = (unsigned long) bh->b_data + bh->b_size; } sg[nents].length = bh->b_size; nents++; } while ((bh = bh->b_reqnext) != NULL); if (nents == 0) BUG(); hwif->sg_dma_direction = ddir; return pci_map_sg(hwif->pci_dev, sg, nents, ddir);}/* XXX: duplicated code. See PV#: 896400 *//** * Copied from drivers/ide/ide-dma.c * sgiioc4_ide_raw_build_sglist - map IDE scatter gather for DMA * @hwif: the interface to build the DMA table for * @rq: the request holding the sg list * * Perform the PCI mapping magic neccessary to access the source or * target buffers of a taskfile request via PCI DMA. The lower layers * of the kernel provide the neccessary cache management so that we can * operate in a portable fashion * * This code is identical to ide_raw_build_sglist in ide-dma.c * however that it not exported and even if it were would create * dependancy problems for modular drivers. */static intsgiioc4_ide_raw_build_sglist(ide_hwif_t * hwif, struct request *rq){ struct scatterlist *sg = hwif->sg_table; int nents = 0; ide_task_t *args = rq->special; u8 *virt_addr = rq->buffer; int sector_count = rq->nr_sectors; if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) hwif->sg_dma_direction = PCI_DMA_TODEVICE; else hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;#if 1 if (sector_count > 128) { memset(&sg[nents], 0, sizeof (*sg)); sg[nents].address = virt_addr; sg[nents].length = 128 * SECTOR_SIZE; nents++; virt_addr = virt_addr + (128 * SECTOR_SIZE); sector_count -= 128; } memset(&sg[nents], 0, sizeof (*sg)); sg[nents].address = virt_addr; sg[nents].length = sector_count * SECTOR_SIZE; nents++;#else while (sector_count > 128) { memset(&sg[nents], 0, sizeof (*sg)); sg[nents].address = virt_addr; sg[nents].length = 128 * SECTOR_SIZE; nents++; virt_addr = virt_addr + (128 * SECTOR_SIZE); sector_count -= 128; }; memset(&sg[nents], 0, sizeof (*sg)); sg[nents].address = virt_addr; sg[nents].length = sector_count * SECTOR_SIZE; nents++;#endif return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);}#ifdef CONFIG_PROC_FSstatic intsgiioc4_get_info(char *buffer, char **addr, off_t offset, int count){ char *p = buffer; unsigned int class_rev; int i = 0; while (i < n_sgiioc4_devs) { pci_read_config_dword(sgiioc4_devs[i], PCI_CLASS_REVISION, &class_rev); class_rev &= 0xff; if (sgiioc4_devs[i]->device == PCI_DEVICE_ID_SGI_IOC4) { p += sprintf(p, "\n SGI IOC4 Chipset rev %d. ", class_rev); p += sprintf(p, "\n Chipset has 1 IDE channel and supports 2 devices on that channel."); p += sprintf(p, "\n Chipset supports DMA in MultiMode-2 data transfer protocol.\n"); /* Do we need more info. here? */ } i++; } return p - buffer;}#endif /* CONFIG_PROC_FS */static int __devinitsgiioc4_init_one(struct pci_dev *dev, const struct pci_device_id *id){ unsigned int class_rev; ide_pci_device_t *d = &sgiioc4_chipsets[id->driver_data]; if (dev->device != d->device) { printk(KERN_ERR "Error in sgiioc4_init_one(dev 0x%p | id 0x%p )\n", (void *) dev, (void *) id); BUG(); } pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); class_rev &= 0xff; if (class_rev < IOC4_SUPPORTED_FIRMWARE_REV) { printk(KERN_INFO "Disabling the IOC4 IDE Part due to unsupported Firmware Rev (%d). \n",class_rev); printk(KERN_INFO "Please upgrade to Firmware Rev 46 or higher \n"); return 0; } printk(KERN_INFO "%s: IDE controller at PCI slot %s\n", d->name, dev->slot_name); if (pci_init_sgiioc4(dev, d->name)) return 0; MOD_INC_USE_COUNT; return 0;}static struct pci_device_id sgiioc4_pci_tbl[] __devinitdata = { { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID, PCI_ANY_ID, 0x0b4000, 0xFFFFFF, 0 }, { 0 }};static struct pci_driver driver = { .name = "SGI-IOC4 IDE", .id_table = sgiioc4_pci_tbl, .probe = sgiioc4_init_one,};static intsgiioc4_ide_init(void){ return ide_pci_register_driver(&driver);}static voidsgiioc4_ide_exit(void){ ide_pci_unregister_driver(&driver);}module_init(sgiioc4_ide_init);module_exit(sgiioc4_ide_exit);MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)");MODULE_DESCRIPTION("PCI driver module for SGI IOC4 Base-IO Card");MODULE_LICENSE("GPL");EXPORT_NO_SYMBOLS;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?