📄 sgiioc4.c
字号:
if (!request_region(dma_base, num_ports, hwif->name)) { printk(KERN_ERR "%s(%s) -- ERROR, Addresses 0x%p to 0x%p " "ALREADY in use\n", __FUNCTION__, hwif->name, (void *) dma_base, (void *) dma_base + num_ports - 1); goto dma_alloc_failure; } hwif->dma_base = dma_base; hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, &hwif->dmatable_dma); if (!hwif->dmatable_cpu) goto dma_alloc_failure; hwif->sg_max_nents = IOC4_PRD_ENTRIES; hwif->dma_base2 = (unsigned long) pci_alloc_consistent(hwif->pci_dev, IOC4_IDE_CACHELINE_SIZE, (dma_addr_t *) &(hwif->dma_status)); if (!hwif->dma_base2) goto dma_base2alloc_failure; return;dma_base2alloc_failure: pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma); printk(KERN_INFO "%s() -- Error! Unable to allocate DMA Maps for drive %s\n", __FUNCTION__, hwif->name); printk(KERN_INFO "Changing from DMA to PIO mode for Drive %s\n", hwif->name);dma_alloc_failure: /* Disable DMA because we couldnot allocate any DMA maps */ hwif->autodma = 0; hwif->atapi_dma = 0;}/* Initializes the IOC4 DMA Engine */static voidsgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive){ u32 ioc4_dma; ide_hwif_t *hwif = HWIF(drive); u64 dma_base = hwif->dma_base; u32 dma_addr, ending_dma_addr; ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); if (ioc4_dma & IOC4_S_DMA_ACTIVE) { printk(KERN_WARNING "%s(%s):Warning!! DMA from previous transfer was still active\n", __FUNCTION__, drive->name); hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); if (ioc4_dma & IOC4_S_DMA_STOP) printk(KERN_ERR "%s(%s) : IOC4 Dma STOP bit is still 1\n", __FUNCTION__, drive->name); } ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); if (ioc4_dma & IOC4_S_DMA_ERROR) { printk(KERN_WARNING "%s(%s) : Warning!! - DMA Error during Previous" " transfer | status 0x%x\n", __FUNCTION__, drive->name, ioc4_dma); hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); if (ioc4_dma & IOC4_S_DMA_STOP) printk(KERN_ERR "%s(%s) : IOC4 DMA STOP bit is still 1\n", __FUNCTION__, drive->name); } /* Address of the Scatter Gather List */ dma_addr = cpu_to_le32(hwif->dmatable_dma); hwif->OUTL(dma_addr, dma_base + IOC4_DMA_PTR_L * 4); /* Address of the Ending DMA */ memset((unsigned int *) hwif->dma_base2, 0, IOC4_IDE_CACHELINE_SIZE); ending_dma_addr = cpu_to_le32(hwif->dma_status); hwif->OUTL(ending_dma_addr, dma_base + IOC4_DMA_END_ADDR * 4); hwif->OUTL(dma_direction, dma_base + IOC4_DMA_CTRL * 4); drive->waiting_for_dma = 1;}/* IOC4 Scatter Gather list Format *//* 128 Bit entries to support 64 bit addresses in the future *//* The Scatter Gather list Entry should be in the BIG-ENDIAN Format *//* --------------------------------------------------------------------- *//* | Upper 32 bits - Zero | Lower 32 bits- address | *//* --------------------------------------------------------------------- *//* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| *//* --------------------------------------------------------------------- *//* Creates the scatter gather list, DMA Table */static unsigned intsgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir){ ide_hwif_t *hwif = HWIF(drive); unsigned int *table = hwif->dmatable_cpu; unsigned int count = 0, i = 1; struct scatterlist *sg; hwif->sg_nents = i = ide_build_sglist(drive, rq); if (!i) return 0; /* sglist of length Zero */ sg = hwif->sg_table; while (i && sg_dma_len(sg)) { dma_addr_t cur_addr; int cur_len; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); while (cur_len) { if (count++ >= IOC4_PRD_ENTRIES) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); goto use_pio_instead; } else { u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff); if (bcount > cur_len) bcount = cur_len; /* put the addr, length in * the IOC4 dma-table format */ *table = 0x0; table++; *table = cpu_to_be32(cur_addr); table++; *table = 0x0; table++; xcount = bcount & 0xffff; *table = cpu_to_be32(xcount); table++; cur_addr += bcount; cur_len -= bcount; } } sg++; i--; } if (count) { table--; *table |= cpu_to_be32(0x80000000); return count; }use_pio_instead: pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); return 0; /* revert to PIO for this request */}static int sgiioc4_ide_dma_setup(ide_drive_t *drive){ struct request *rq = HWGROUP(drive)->rq; unsigned int count = 0; int ddir; if (rq_data_dir(rq)) ddir = PCI_DMA_TODEVICE; else ddir = PCI_DMA_FROMDEVICE; if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) { /* try PIO instead of DMA */ ide_map_sg(drive, rq); return 1; } if (rq_data_dir(rq)) /* Writes TO the IOC4 FROM Main Memory */ ddir = IOC4_DMA_READ; else /* Writes FROM the IOC4 TO Main Memory */ ddir = IOC4_DMA_WRITE; sgiioc4_configure_for_dma(ddir, drive); return 0;}static void __devinitide_init_sgiioc4(ide_hwif_t * hwif){ hwif->mmio = 2; hwif->autodma = 1; hwif->atapi_dma = 1; hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ hwif->mwdma_mask = 0x2; /* Multimode-2 DMA */ hwif->swdma_mask = 0x2; hwif->tuneproc = NULL; /* Sets timing for PIO mode */ hwif->speedproc = NULL; /* Sets timing for DMA &/or PIO modes */ hwif->selectproc = NULL;/* Use the default routine to select drive */ hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */ hwif->pre_reset = NULL; /* No HBA specific pre_set needed */ hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine, clear interrupts */ hwif->intrproc = NULL; /* Enable or Disable interrupt from drive */ hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */ hwif->quirkproc = NULL; hwif->busproc = NULL; hwif->dma_setup = &sgiioc4_ide_dma_setup; hwif->dma_start = &sgiioc4_ide_dma_start; hwif->ide_dma_end = &sgiioc4_ide_dma_end; hwif->ide_dma_check = &sgiioc4_ide_dma_check; hwif->ide_dma_on = &sgiioc4_ide_dma_on; hwif->ide_dma_off_quietly = &sgiioc4_ide_dma_off_quietly; hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; hwif->ide_dma_host_on = &sgiioc4_ide_dma_host_on; hwif->ide_dma_host_off = &sgiioc4_ide_dma_host_off; hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq; hwif->ide_dma_timeout = &__ide_dma_timeout; hwif->INB = &sgiioc4_INB;}static int __devinitsgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d){ unsigned long base, ctl, dma_base, irqport; ide_hwif_t *hwif; int h; /* * Find an empty HWIF; if none available, return -ENOMEM. */ for (h = 0; h < MAX_HWIFS; ++h) { hwif = &ide_hwifs[h]; if (hwif->chipset == ide_unknown) break; } if (h == MAX_HWIFS) { printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", d->name); return -ENOMEM; } /* Get the CmdBlk and CtrlBlk Base Registers */ base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET; ctl = pci_resource_start(dev, 0) + IOC4_CTRL_OFFSET; irqport = pci_resource_start(dev, 0) + IOC4_INTR_OFFSET; dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; if (!request_region(base, IOC4_CMD_CTL_BLK_SIZE, hwif->name)) { printk(KERN_ERR "%s : %s -- ERROR, Port Addresses " "0x%p to 0x%p ALREADY in use\n", __FUNCTION__, hwif->name, (void *) base, (void *) base + IOC4_CMD_CTL_BLK_SIZE); return -ENOMEM; } if (hwif->io_ports[IDE_DATA_OFFSET] != base) { /* Initialize the IO registers */ sgiioc4_init_hwif_ports(&hwif->hw, base, ctl, irqport); memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof (hwif->io_ports)); hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; } hwif->irq = dev->irq; hwif->chipset = ide_pci; hwif->pci_dev = dev; hwif->channel = 0; /* Single Channel chip */ hwif->cds = (struct ide_pci_device_s *) d; hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */ /* Initializing chipset IRQ Registers */ hwif->OUTL(0x03, irqport + IOC4_INTR_SET * 4); ide_init_sgiioc4(hwif); if (dma_base) ide_dma_sgiioc4(hwif, dma_base); else printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n", hwif->name, d->name); if (probe_hwif_init(hwif)) return -EIO; /* Create /proc/ide entries */ create_proc_ide_interfaces(); return 0;}static unsigned int __devinitpci_init_sgiioc4(struct pci_dev *dev, ide_pci_device_t * d){ unsigned int class_rev; int ret; pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); class_rev &= 0xff; printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n", d->name, pci_name(dev), class_rev); if (class_rev < IOC4_SUPPORTED_FIRMWARE_REV) { printk(KERN_ERR "Skipping %s IDE controller in slot %s: " "firmware is obsolete - please upgrade to revision" "46 or higher\n", d->name, pci_name(dev)); ret = -EAGAIN; goto out; } ret = sgiioc4_ide_setup_pci_device(dev, d);out: return ret;}static ide_pci_device_t sgiioc4_chipsets[] __devinitdata = { { /* Channel 0 */ .name = "SGIIOC4", .init_hwif = ide_init_sgiioc4, .init_dma = ide_dma_sgiioc4, .channels = 1, .autodma = AUTODMA, /* SGI IOC4 doesn't have enablebits. */ .bootable = ON_BOARD, }};intioc4_ide_attach_one(struct ioc4_driver_data *idd){ return pci_init_sgiioc4(idd->idd_pdev, &sgiioc4_chipsets[idd->idd_pci_id->driver_data]);}static struct ioc4_submodule ioc4_ide_submodule = { .is_name = "IOC4_ide", .is_owner = THIS_MODULE, .is_probe = ioc4_ide_attach_one,/* .is_remove = ioc4_ide_remove_one, */};static int __devinitioc4_ide_init(void){ return ioc4_register_submodule(&ioc4_ide_submodule);}static void __devexitioc4_ide_exit(void){ ioc4_unregister_submodule(&ioc4_ide_submodule);}module_init(ioc4_ide_init);module_exit(ioc4_ide_exit);MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)");MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card");MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -