📄 pci.c
字号:
pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | range->pci_addr.a_lo; /* Assume these are both zero */ if ((pci_addr != 0) || (isa_addr != 0)) { printk(KERN_ERR "unexpected isa to pci mapping: %s\n", __FUNCTION__); return; } size = PAGE_ALIGN(range->size); __ioremap_explicit(phb_io_base_phys, (unsigned long) phb_io_base_virt, size, _PAGE_NO_CACHE | _PAGE_GUARDED); }}void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev){ unsigned int *ranges; unsigned long size; int rlen = 0; int memno = 0; struct resource *res; int np, na = prom_n_addr_cells(dev); unsigned long pci_addr, cpu_phys_addr; np = na + 5; /* From "PCI Binding to 1275" * The ranges property is laid out as an array of elements, * each of which comprises: * cells 0 - 2: a PCI address * cells 3 or 3+4: a CPU physical address * (size depending on dev->n_addr_cells) * cells 4+5 or 5+6: the size of the range */ rlen = 0; hose->io_base_phys = 0; ranges = (unsigned int *) get_property(dev, "ranges", &rlen); while ((rlen -= np * sizeof(unsigned int)) >= 0) { res = NULL; pci_addr = (unsigned long)ranges[1] << 32 | ranges[2]; cpu_phys_addr = ranges[3]; if (na == 2) cpu_phys_addr = cpu_phys_addr << 32 | ranges[4]; size = (unsigned long)ranges[na+3] << 32 | ranges[na+4]; if (size == 0) continue; switch ((ranges[0] >> 24) & 0x3) { case 1: /* I/O space */ hose->io_base_phys = cpu_phys_addr; hose->pci_io_size = size; res = &hose->io_resource; res->flags = IORESOURCE_IO; res->start = pci_addr; DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, res->start, res->start + size - 1); break; case 2: /* memory space */ memno = 0; while (memno < 3 && hose->mem_resources[memno].flags) ++memno; if (memno == 0) hose->pci_mem_offset = cpu_phys_addr - pci_addr; if (memno < 3) { res = &hose->mem_resources[memno]; res->flags = IORESOURCE_MEM; res->start = cpu_phys_addr; DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, res->start, res->start + size - 1); } break; } if (res != NULL) { res->name = dev->full_name; res->end = res->start + size - 1; res->parent = NULL; res->sibling = NULL; res->child = NULL; } ranges += np; }}void __init pci_setup_phb_io(struct pci_controller *hose, int primary){ unsigned long size = hose->pci_io_size; unsigned long io_virt_offset; struct resource *res; struct device_node *isa_dn; hose->io_base_virt = reserve_phb_iospace(size); DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", hose->global_number, hose->io_base_phys, (unsigned long) hose->io_base_virt); if (primary) { pci_io_base = (unsigned long)hose->io_base_virt; isa_dn = of_find_node_by_type(NULL, "isa"); if (isa_dn) { isa_io_base = pci_io_base; pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, hose->io_base_virt); of_node_put(isa_dn); /* Allow all IO */ io_page_mask = -1; } } io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; res = &hose->io_resource; res->start += io_virt_offset; res->end += io_virt_offset;}void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary){ unsigned long size = hose->pci_io_size; unsigned long io_virt_offset; struct resource *res; hose->io_base_virt = __ioremap(hose->io_base_phys, size, _PAGE_NO_CACHE | _PAGE_GUARDED); DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", hose->global_number, hose->io_base_phys, (unsigned long) hose->io_base_virt); if (primary) pci_io_base = (unsigned long)hose->io_base_virt; io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; res = &hose->io_resource; res->start += io_virt_offset; res->end += io_virt_offset;}static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, unsigned long *start_virt, unsigned long *size){ struct pci_controller *hose = pci_bus_to_host(bus); struct pci_bus_region region; struct resource *res; if (bus->self) { res = bus->resource[0]; pcibios_resource_to_bus(bus->self, ®ion, res); *start_phys = hose->io_base_phys + region.start; *start_virt = (unsigned long) hose->io_base_virt + region.start; if (region.end > region.start) *size = region.end - region.start + 1; else { printk("%s(): unexpected region 0x%lx->0x%lx\n", __FUNCTION__, region.start, region.end); return 1; } } else { /* Root Bus */ res = &hose->io_resource; *start_phys = hose->io_base_phys; *start_virt = (unsigned long) hose->io_base_virt; if (res->end > res->start) *size = res->end - res->start + 1; else { printk("%s(): unexpected region 0x%lx->0x%lx\n", __FUNCTION__, res->start, res->end); return 1; } } return 0;}int unmap_bus_range(struct pci_bus *bus){ unsigned long start_phys; unsigned long start_virt; unsigned long size; if (!bus) { printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); return 1; } if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) return 1; if (iounmap_explicit((void __iomem *) start_virt, size)) return 1; return 0;}EXPORT_SYMBOL(unmap_bus_range);int remap_bus_range(struct pci_bus *bus){ unsigned long start_phys; unsigned long start_virt; unsigned long size; if (!bus) { printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); return 1; } if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) return 1; printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); if (__ioremap_explicit(start_phys, start_virt, size, _PAGE_NO_CACHE | _PAGE_GUARDED)) return 1; return 0;}EXPORT_SYMBOL(remap_bus_range);void phbs_remap_io(void){ struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) remap_bus_range(hose->bus);}/* * ppc64 can have multifunction devices that do not respond to function 0. * In this case we must scan all functions. * XXX this can go now, we use the OF device tree in all the * cases that caused problems. -- paulus */int pcibios_scan_all_fns(struct pci_bus *bus, int devfn){ return 0;}static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev){ struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long start, end, mask, offset; if (res->flags & IORESOURCE_IO) { offset = (unsigned long)hose->io_base_virt - pci_io_base; start = res->start += offset; end = res->end += offset; /* Need to allow IO access to pages that are in the ISA range */ if (start < MAX_ISA_PORT) { if (end > MAX_ISA_PORT) end = MAX_ISA_PORT; start >>= PAGE_SHIFT; end >>= PAGE_SHIFT; /* get the range of pages for the map */ mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1); io_page_mask |= mask; } } else if (res->flags & IORESOURCE_MEM) { res->start += hose->pci_mem_offset; res->end += hose->pci_mem_offset; }}void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus){ /* Update device resources. */ int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) if (dev->resource[i].flags) fixup_resource(&dev->resource[i], dev);}EXPORT_SYMBOL(pcibios_fixup_device_resources);static void __devinit do_bus_setup(struct pci_bus *bus){ struct pci_dev *dev; ppc_md.iommu_bus_setup(bus); list_for_each_entry(dev, &bus->devices, bus_list) ppc_md.iommu_dev_setup(dev); if (ppc_md.irq_bus_setup) ppc_md.irq_bus_setup(bus);}void __devinit pcibios_fixup_bus(struct pci_bus *bus){ struct pci_dev *dev = bus->self; if (dev && pci_probe_only && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { /* This is a subordinate bridge */ pci_read_bridge_bases(bus); pcibios_fixup_device_resources(dev, bus); } do_bus_setup(bus); if (!pci_probe_only) return; list_for_each_entry(dev, &bus->devices, bus_list) if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) pcibios_fixup_device_resources(dev, bus);}EXPORT_SYMBOL(pcibios_fixup_bus);/* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the * openfirmware and sets it in the pci_dev and pci_config line. */int pci_read_irq_line(struct pci_dev *pci_dev){ u8 intpin; struct device_node *node; pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin); if (intpin == 0) return 0; node = pci_device_to_OF_node(pci_dev); if (node == NULL) return -1; if (node->n_intrs == 0) return -1; pci_dev->irq = node->intrs[0].line; pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq); return 0;}EXPORT_SYMBOL(pci_read_irq_line);void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, u64 *start, u64 *end){ struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long offset = 0; if (hose == NULL) return; if (rsrc->flags & IORESOURCE_IO) offset = pci_io_base - (unsigned long)hose->io_base_virt + hose->io_base_phys; *start = rsrc->start + offset; *end = rsrc->end + offset;}#endif /* CONFIG_PPC_MULTIPLATFORM */#define IOBASE_BRIDGE_NUMBER 0#define IOBASE_MEMORY 1#define IOBASE_IO 2#define IOBASE_ISA_IO 3#define IOBASE_ISA_MEM 4long sys_pciconfig_iobase(long which, unsigned long in_bus, unsigned long in_devfn){ struct pci_controller* hose; struct list_head *ln; struct pci_bus *bus = NULL; struct device_node *hose_node; /* Argh ! Please forgive me for that hack, but that's the * simplest way to get existing XFree to not lockup on some * G5 machines... So when something asks for bus 0 io base * (bus 0 is HT root), we return the AGP one instead. */#ifdef CONFIG_PPC_PMAC if (systemcfg->platform == PLATFORM_POWERMAC && machine_is_compatible("MacRISC4")) if (in_bus == 0) in_bus = 0xf0;#endif /* CONFIG_PPC_PMAC */ /* That syscall isn't quite compatible with PCI domains, but it's * used on pre-domains setup. We return the first match */ for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { bus = pci_bus_b(ln); if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate)) break; bus = NULL; } if (bus == NULL || bus->sysdata == NULL) return -ENODEV; hose_node = (struct device_node *)bus->sysdata; hose = PCI_DN(hose_node)->phb; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return -EINVAL; } return -EOPNOTSUPP;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -