📄 pci.c
字号:
/* Ok, try it out.. */ if (allocate_resource(r, res, size, min, -1, size, NULL, NULL) < 0) continue; /* PCI config space updated by caller. */ return 0; } return -EBUSY;}int pci_assign_resource(struct pci_dev *pdev, int resource){ struct pcidev_cookie *pcp = pdev->sysdata; struct pci_pbm_info *pbm = pcp->pbm; struct resource *res = &pdev->resource[resource]; unsigned long min, size; int err; if (res->flags & IORESOURCE_IO) min = pbm->io_space.start + 0x400UL; else min = pbm->mem_space.start; size = res->end - res->start + 1; err = pci_assign_bus_resource(pdev->bus, pdev, res, size, min, resource); if (err < 0) { printk("PCI: Failed to allocate resource %d for %s\n", resource, pci_name(pdev)); } else { /* Update PCI config space. */ pbm->parent->base_address_update(pdev, resource); } return err;}/* Sort resources by alignment */void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head){ int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r; struct resource_list *list, *tmp; unsigned long r_align; r = &dev->resource[i]; r_align = r->end - r->start; if (!(r->flags) || r->parent) continue; if (!r_align) { printk(KERN_WARNING "PCI: Ignore bogus resource %d " "[%lx:%lx] of %s\n", i, r->start, r->end, pci_name(dev)); continue; } r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start; for (list = head; ; list = list->next) { unsigned long align = 0; struct resource_list *ln = list->next; int idx; if (ln) { idx = ln->res - &ln->dev->resource[0]; align = (idx < PCI_BRIDGE_RESOURCES) ? ln->res->end - ln->res->start + 1 : ln->res->start; } if (r_align > align) { tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) panic("pdev_sort_resources(): " "kmalloc() failed!\n"); tmp->next = ln; tmp->res = r; tmp->dev = dev; list->next = tmp; break; } } }}void pcibios_update_irq(struct pci_dev *pdev, int irq){}void pcibios_align_resource(void *data, struct resource *res, unsigned long size, unsigned long align){}int pcibios_enable_device(struct pci_dev *pdev, int mask){ return 0;}void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region, struct resource *res){ struct pci_pbm_info *pbm = pdev->bus->sysdata; struct resource zero_res, *root; zero_res.start = 0; zero_res.end = 0; zero_res.flags = res->flags; if (res->flags & IORESOURCE_IO) root = &pbm->io_space; else root = &pbm->mem_space; pbm->parent->resource_adjust(pdev, &zero_res, root); region->start = res->start - zero_res.start; region->end = res->end - zero_res.start;}void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, struct pci_bus_region *region){ struct pci_pbm_info *pbm = pdev->bus->sysdata; struct resource *root; res->start = region->start; res->end = region->end; if (res->flags & IORESOURCE_IO) root = &pbm->io_space; else root = &pbm->mem_space; pbm->parent->resource_adjust(pdev, res, root);}char * __init pcibios_setup(char *str){ if (!strcmp(str, "onboardfirst")) { pci_device_reorder = 1; return NULL; } if (!strcmp(str, "noreorder")) { pci_device_reorder = 0; return NULL; } return str;}/* Platform support for /proc/bus/pci/X/Y mmap()s. *//* If the user uses a host-bridge as the PCI device, he may use * this to perform a raw mmap() of the I/O or MEM space behind * that controller. * * This can be useful for execution of x86 PCI bios initialization code * on a PCI card, like the xfree86 int10 stuff does. */static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state){ struct pcidev_cookie *pcp = pdev->sysdata; struct pci_pbm_info *pbm; struct pci_controller_info *p; unsigned long space_size, user_offset, user_size; if (!pcp) return -ENXIO; pbm = pcp->pbm; if (!pbm) return -ENXIO; p = pbm->parent; if (p->pbms_same_domain) { unsigned long lowest, highest; lowest = ~0UL; highest = 0UL; if (mmap_state == pci_mmap_io) { if (p->pbm_A.io_space.flags) { lowest = p->pbm_A.io_space.start; highest = p->pbm_A.io_space.end + 1; } if (p->pbm_B.io_space.flags) { if (lowest > p->pbm_B.io_space.start) lowest = p->pbm_B.io_space.start; if (highest < p->pbm_B.io_space.end + 1) highest = p->pbm_B.io_space.end + 1; } space_size = highest - lowest; } else { if (p->pbm_A.mem_space.flags) { lowest = p->pbm_A.mem_space.start; highest = p->pbm_A.mem_space.end + 1; } if (p->pbm_B.mem_space.flags) { if (lowest > p->pbm_B.mem_space.start) lowest = p->pbm_B.mem_space.start; if (highest < p->pbm_B.mem_space.end + 1) highest = p->pbm_B.mem_space.end + 1; } space_size = highest - lowest; } } else { if (mmap_state == pci_mmap_io) { space_size = (pbm->io_space.end - pbm->io_space.start) + 1; } else { space_size = (pbm->mem_space.end - pbm->mem_space.start) + 1; } } /* Make sure the request is in range. */ user_offset = vma->vm_pgoff << PAGE_SHIFT; user_size = vma->vm_end - vma->vm_start; if (user_offset >= space_size || (user_offset + user_size) > space_size) return -EINVAL; if (p->pbms_same_domain) { unsigned long lowest = ~0UL; if (mmap_state == pci_mmap_io) { if (p->pbm_A.io_space.flags) lowest = p->pbm_A.io_space.start; if (p->pbm_B.io_space.flags && lowest > p->pbm_B.io_space.start) lowest = p->pbm_B.io_space.start; } else { if (p->pbm_A.mem_space.flags) lowest = p->pbm_A.mem_space.start; if (p->pbm_B.mem_space.flags && lowest > p->pbm_B.mem_space.start) lowest = p->pbm_B.mem_space.start; } vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT; } else { if (mmap_state == pci_mmap_io) { vma->vm_pgoff = (pbm->io_space.start + user_offset) >> PAGE_SHIFT; } else { vma->vm_pgoff = (pbm->mem_space.start + user_offset) >> PAGE_SHIFT; } } return 0;}/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding * to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state){ unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long user32 = user_offset & pci_memspace_mask; unsigned long largest_base, this_base, addr32; int i; if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) return __pci_mmap_make_offset_bus(dev, vma, mmap_state); /* Figure out which base address this is for. */ largest_base = 0UL; for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; /* Active? */ if (!rp->flags) continue; /* Same type? */ if (i == PCI_ROM_RESOURCE) { if (mmap_state != pci_mmap_mem) continue; } else { if ((mmap_state == pci_mmap_io && (rp->flags & IORESOURCE_IO) == 0) || (mmap_state == pci_mmap_mem && (rp->flags & IORESOURCE_MEM) == 0)) continue; } this_base = rp->start; addr32 = (this_base & PAGE_MASK) & pci_memspace_mask; if (mmap_state == pci_mmap_io) addr32 &= 0xffffff; if (addr32 <= user32 && this_base > largest_base) largest_base = this_base; } if (largest_base == 0UL) return -EINVAL; /* Now construct the final physical address. */ if (mmap_state == pci_mmap_io) vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT); else vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT); return 0;}/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device * mapping. */static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state){ vma->vm_flags |= (VM_SHM | VM_LOCKED);}/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state){ /* Our io_remap_page_range takes care of this, do nothing. */}extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space);/* Perform the actual remap of the pages for a PCI device mapping, as appropriate * for this architecture. The region in the process to map is described by vm_start * and vm_end members of VMA, the base physical address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine){ int ret; ret = __pci_mmap_make_offset(dev, vma, mmap_state); if (ret < 0) return ret; __pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_pgprot(dev, vma, mmap_state); ret = io_remap_page_range(vma, vma->vm_start, (vma->vm_pgoff << PAGE_SHIFT | (write_combine ? 0x1UL : 0x0UL)), vma->vm_end - vma->vm_start, vma->vm_page_prot, 0); if (ret) return ret; vma->vm_flags |= VM_IO; return 0;}/* Return the domain nuber for this pci bus */int pci_domain_nr(struct pci_bus *pbus){ struct pci_pbm_info *pbm = pbus->sysdata; int ret; if (pbm == NULL || pbm->parent == NULL) { ret = -ENXIO; } else { struct pci_controller_info *p = pbm->parent; ret = p->index; if (p->pbms_same_domain == 0) ret = ((ret << 1) + ((pbm == &pbm->parent->pbm_B) ? 1 : 0)); } return ret;}EXPORT_SYMBOL(pci_domain_nr);int pci_name_bus(char *name, struct pci_bus *bus){ sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number); return 0;}int pcibios_prep_mwi(struct pci_dev *dev){ /* We set correct PCI_CACHE_LINE_SIZE register values for every * device probed on this platform. So there is nothing to check * and this always succeeds. */ return 0;}#endif /* !(CONFIG_PCI) */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -