⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci.c

📁 底层驱动开发
💻 C
📖 第 1 页 / 共 3 页
字号:
		size = GET_64BIT(ranges, 6);		if (flags == 0 || size == 0)			continue;		if (flags & IORESOURCE_IO) {			res = bus->resource[0];			if (res->flags) {				printk(KERN_ERR "PCI: ignoring extra I/O range"				       " for bridge %s\n", node->full_name);				continue;			}		} else {			if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {				printk(KERN_ERR "PCI: too many memory ranges"				       " for bridge %s\n", node->full_name);				continue;			}			res = bus->resource[i];			++i;		}		res->start = GET_64BIT(ranges, 1);		res->end = res->start + size - 1;		res->flags = flags;		fixup_resource(res, dev);	}	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),		bus->number);	mode = PCI_PROBE_NORMAL;	if (ppc_md.pci_probe_mode)		mode = ppc_md.pci_probe_mode(bus);	if (mode == PCI_PROBE_DEVTREE)		of_scan_bus(node, bus);	else if (mode == PCI_PROBE_NORMAL)		pci_scan_child_bus(bus);}#endif /* CONFIG_PPC_MULTIPLATFORM */static void __devinit scan_phb(struct pci_controller *hose){	struct pci_bus *bus;	struct device_node *node = hose->arch_data;	int i, mode;	struct resource *res;	bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);	if (bus == NULL) {		printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",		       hose->global_number);		return;	}	bus->secondary = hose->first_busno;	hose->bus = bus;	bus->resource[0] = res = &hose->io_resource;	if (res->flags && request_resource(&ioport_resource, res))		printk(KERN_ERR "Failed to request PCI IO region "		       "on PCI domain %04x\n", hose->global_number);	for (i = 0; i < 3; ++i) {		res = &hose->mem_resources[i];		bus->resource[i+1] = res;		if (res->flags && request_resource(&iomem_resource, res))			printk(KERN_ERR "Failed to request PCI memory region "			       "on PCI domain %04x\n", hose->global_number);	}	mode = PCI_PROBE_NORMAL;#ifdef CONFIG_PPC_MULTIPLATFORM	if (ppc_md.pci_probe_mode)		mode = ppc_md.pci_probe_mode(bus);	if (mode == PCI_PROBE_DEVTREE) {		bus->subordinate = hose->last_busno;		of_scan_bus(node, bus);	}#endif /* CONFIG_PPC_MULTIPLATFORM */	if (mode == PCI_PROBE_NORMAL)		hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);	pci_bus_add_devices(bus);}static int __init pcibios_init(void){	struct pci_controller *hose, *tmp;	/* For now, override phys_mem_access_prot. If we need it,	 * later, we may move that initialization to each ppc_md	 */	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;#ifdef CONFIG_PPC_ISERIES	iSeries_pcibios_init(); #endif	printk("PCI: Probing PCI hardware\n");	/* Scan all of the recorded PCI controllers.  */	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)		scan_phb(hose);#ifndef CONFIG_PPC_ISERIES	if (pci_probe_only)		pcibios_claim_of_setup();	else		/* FIXME: `else' will be removed when		   pci_assign_unassigned_resources() is able to work		   correctly with [partially] allocated PCI tree. */		pci_assign_unassigned_resources();#endif /* !CONFIG_PPC_ISERIES */	/* Call machine dependent final fixup */	if (ppc_md.pcibios_fixup)		ppc_md.pcibios_fixup();	/* Cache the location of the ISA bridge (if we have one) */	ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);	if (ppc64_isabridge_dev != NULL)		printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));	printk("PCI: Probing PCI hardware done\n");	return 0;}subsys_initcall(pcibios_init);char __init *pcibios_setup(char *str){	return str;}int pcibios_enable_device(struct pci_dev *dev, int mask){	u16 cmd, oldcmd;	int i;	pci_read_config_word(dev, PCI_COMMAND, &cmd);	oldcmd = cmd;	for (i = 0; i < PCI_NUM_RESOURCES; i++) {		struct resource *res = &dev->resource[i];		/* Only set up the requested stuff */		if (!(mask & (1<<i)))			continue;		if (res->flags & IORESOURCE_IO)			cmd |= PCI_COMMAND_IO;		if (res->flags & IORESOURCE_MEM)			cmd |= PCI_COMMAND_MEMORY;	}	if (cmd != oldcmd) {		printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",		       pci_name(dev), cmd);                /* Enable the appropriate bits in the PCI command register.  */		pci_write_config_word(dev, PCI_COMMAND, cmd);	}	return 0;}/* * Return the domain number for this bus. */int pci_domain_nr(struct pci_bus *bus){#ifdef CONFIG_PPC_ISERIES	return 0;#else	struct pci_controller *hose = pci_bus_to_host(bus);	return hose->global_number;#endif}EXPORT_SYMBOL(pci_domain_nr);/* Decide whether to display the domain number in /proc */int pci_proc_domain(struct pci_bus *bus){#ifdef CONFIG_PPC_ISERIES	return 0;#else	struct pci_controller *hose = pci_bus_to_host(bus);	return hose->buid;#endif}/* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. *  -- paulus. *//* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap.  They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,					       unsigned long *offset,					       enum pci_mmap_state mmap_state){	struct pci_controller *hose = pci_bus_to_host(dev->bus);	unsigned long io_offset = 0;	int i, res_bit;	if (hose == 0)		return NULL;		/* should never happen */	/* If memory, add on the PCI bridge address offset */	if (mmap_state == pci_mmap_mem) {		*offset += hose->pci_mem_offset;		res_bit = IORESOURCE_MEM;	} else {		io_offset = (unsigned long)hose->io_base_virt - pci_io_base;		*offset += io_offset;		res_bit = IORESOURCE_IO;	}	/*	 * Check that the offset requested corresponds to one of the	 * resources of the device.	 */	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {		struct resource *rp = &dev->resource[i];		int flags = rp->flags;		/* treat ROM as memory (should be already) */		if (i == PCI_ROM_RESOURCE)			flags |= IORESOURCE_MEM;		/* Active and same type? */		if ((flags & res_bit) == 0)			continue;		/* In the range of this resource? */		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)			continue;		/* found it! construct the final physical address */		if (mmap_state == pci_mmap_io)		       	*offset += hose->io_base_phys - io_offset;		return rp;	}	return NULL;}/* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,				      pgprot_t protection,				      enum pci_mmap_state mmap_state,				      int write_combine){	unsigned long prot = pgprot_val(protection);	/* Write combine is always 0 on non-memory space mappings. On	 * memory space, if the user didn't pass 1, we check for a	 * "prefetchable" resource. This is a bit hackish, but we use	 * this to workaround the inability of /sysfs to provide a write	 * combine bit	 */	if (mmap_state != pci_mmap_mem)		write_combine = 0;	else if (write_combine == 0) {		if (rp->flags & IORESOURCE_PREFETCH)			write_combine = 1;	}	/* XXX would be nice to have a way to ask for write-through */	prot |= _PAGE_NO_CACHE;	if (write_combine)		prot &= ~_PAGE_GUARDED;	else		prot |= _PAGE_GUARDED;	printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,	       prot);	return __pgprot(prot);}/* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */pgprot_t pci_phys_mem_access_prot(struct file *file,				  unsigned long offset,				  unsigned long size,				  pgprot_t protection){	struct pci_dev *pdev = NULL;	struct resource *found = NULL;	unsigned long prot = pgprot_val(protection);	int i;	if (page_is_ram(offset >> PAGE_SHIFT))		return __pgprot(prot);	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;	for_each_pci_dev(pdev) {		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {			struct resource *rp = &pdev->resource[i];			int flags = rp->flags;			/* Active and same type? */			if ((flags & IORESOURCE_MEM) == 0)				continue;			/* In the range of this resource? */			if (offset < (rp->start & PAGE_MASK) ||			    offset > rp->end)				continue;			found = rp;			break;		}		if (found)			break;	}	if (found) {		if (found->flags & IORESOURCE_PREFETCH)			prot &= ~_PAGE_GUARDED;		pci_dev_put(pdev);	}	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);	return __pgprot(prot);}/* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture.  The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,			enum pci_mmap_state mmap_state,			int write_combine){	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;	struct resource *rp;	int ret;	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);	if (rp == NULL)		return -EINVAL;	vma->vm_pgoff = offset >> PAGE_SHIFT;	vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,						  vma->vm_page_prot,						  mmap_state, write_combine);	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,			       vma->vm_end - vma->vm_start, vma->vm_page_prot);	return ret;}#ifdef CONFIG_PPC_MULTIPLATFORMstatic ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf){	struct pci_dev *pdev;	struct device_node *np;	pdev = to_pci_dev (dev);	np = pci_device_to_OF_node(pdev);	if (np == NULL || np->full_name == NULL)		return 0;	return sprintf(buf, "%s", np->full_name);}static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);#endif /* CONFIG_PPC_MULTIPLATFORM */void pcibios_add_platform_entries(struct pci_dev *pdev){#ifdef CONFIG_PPC_MULTIPLATFORM	device_create_file(&pdev->dev, &dev_attr_devspec);#endif /* CONFIG_PPC_MULTIPLATFORM */}#ifdef CONFIG_PPC_MULTIPLATFORM#define ISA_SPACE_MASK 0x1#define ISA_SPACE_IO 0x1static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,				      unsigned long phb_io_base_phys,				      void __iomem * phb_io_base_virt){	struct isa_range *range;	unsigned long pci_addr;	unsigned int isa_addr;	unsigned int size;	int rlen = 0;	range = (struct isa_range *) get_property(isa_node, "ranges", &rlen);	if (range == NULL || (rlen < sizeof(struct isa_range))) {		printk(KERN_ERR "no ISA ranges or unexpected isa range size,"		       "mapping 64k\n");		__ioremap_explicit(phb_io_base_phys,				   (unsigned long)phb_io_base_virt,				   0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);		return;		}		/* From "ISA Binding to 1275"	 * The ranges property is laid out as an array of elements,	 * each of which comprises:	 *   cells 0 - 1:	an ISA address	 *   cells 2 - 4:	a PCI address 	 *			(size depending on dev->n_addr_cells)	 *   cell 5:		the size of the range	 */	if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {		isa_addr = range->isa_addr.a_lo;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -