pci.c

来自「h内核」· C语言 代码 · 共 876 行 · 第 1/2 页

C
876
字号
/* * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. *  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM *   Rework, based on alpha PCI code. * *      This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. */#undef DEBUG#include <linux/config.h>#include <linux/kernel.h>#include <linux/pci.h>#include <linux/string.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/mm.h>#include <linux/list.h>#include <asm/processor.h>#include <asm/io.h>#include <asm/prom.h>#include <asm/pci-bridge.h>#include <asm/byteorder.h>#include <asm/irq.h>#include <asm/machdep.h>#include <asm/udbg.h>#include "pci.h"#ifdef DEBUG#define DBG(fmt...) udbg_printf(fmt)#else#define DBG(fmt...)#endifunsigned long pci_probe_only = 1;unsigned long pci_assign_all_buses = 0;/* * legal IO pages under MAX_ISA_PORT.  This is to ensure we don't touch * devices we don't have access to. */unsigned long io_page_mask;EXPORT_SYMBOL(io_page_mask);unsigned int pcibios_assign_all_busses(void){	return pci_assign_all_buses;}/* pci_io_base -- the base address from which io bars are offsets. * This is the lowest I/O base address (so bar values are always positive), * and it *must* be the start of ISA space if an ISA bus exists because * ISA drivers use hard coded offsets.  If no ISA bus exists a dummy * page is mapped and isa_io_limit prevents access to it. */unsigned long isa_io_base;	/* NULL if no ISA bus */unsigned long pci_io_base;void iSeries_pcibios_init(void);LIST_HEAD(hose_list);struct pci_dma_ops pci_dma_ops;EXPORT_SYMBOL(pci_dma_ops);int global_phb_number;		/* Global phb counter *//* Cached ISA bridge dev. */struct pci_dev *ppc64_isabridge_dev = NULL;static void fixup_broken_pcnet32(struct pci_dev* dev){	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {		dev->vendor = PCI_VENDOR_ID_AMD;		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);		pci_name_device(dev);	}}DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);void  pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,			      struct resource *res){	unsigned long offset = 0;	struct pci_controller *hose = pci_bus_to_host(dev->bus);	if (!hose)		return;	if (res->flags & IORESOURCE_IO)	        offset = (unsigned long)hose->io_base_virt - pci_io_base;	if (res->flags & IORESOURCE_MEM)		offset = hose->pci_mem_offset;	region->start = res->start - offset;	region->end = res->end - offset;}#ifdef CONFIG_HOTPLUGEXPORT_SYMBOL(pcibios_resource_to_bus);#endif/* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */void pcibios_align_resource(void *data, struct resource *res,			    unsigned long size, unsigned long align){	struct pci_dev *dev = data;	struct pci_controller *hose = pci_bus_to_host(dev->bus);	unsigned long start = res->start;	unsigned long alignto;	if (res->flags & IORESOURCE_IO) {	        unsigned long offset = (unsigned long)hose->io_base_virt -					pci_io_base;		/* Make sure we start at our min on all hoses */		if (start - offset < PCIBIOS_MIN_IO)			start = PCIBIOS_MIN_IO + offset;		/*		 * Put everything into 0x00-0xff region modulo 0x400		 */		if (start & 0x300)			start = (start + 0x3ff) & ~0x3ff;	} else if (res->flags & IORESOURCE_MEM) {		/* Make sure we start at our min on all hoses */		if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)			start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;		/* Align to multiple of size of minimum base.  */		alignto = max(0x1000UL, align);		start = ALIGN(start, alignto);	}	res->start = start;}static DEFINE_SPINLOCK(hose_spinlock);/* * pci_controller(phb) initialized common variables. */void __devinit pci_setup_pci_controller(struct pci_controller *hose){	memset(hose, 0, sizeof(struct pci_controller));	spin_lock(&hose_spinlock);	hose->global_number = global_phb_number++;	list_add_tail(&hose->list_node, &hose_list);	spin_unlock(&hose_spinlock);}static void __init pcibios_claim_one_bus(struct pci_bus *b){	struct pci_dev *dev;	struct pci_bus *child_bus;	list_for_each_entry(dev, &b->devices, bus_list) {		int i;		for (i = 0; i < PCI_NUM_RESOURCES; i++) {			struct resource *r = &dev->resource[i];			if (r->parent || !r->start || !r->flags)				continue;			pci_claim_resource(dev, i);		}	}	list_for_each_entry(child_bus, &b->children, node)		pcibios_claim_one_bus(child_bus);}#ifndef CONFIG_PPC_ISERIESstatic void __init pcibios_claim_of_setup(void){	struct pci_bus *b;	list_for_each_entry(b, &pci_root_buses, node)		pcibios_claim_one_bus(b);}#endifstatic int __init pcibios_init(void){	struct pci_controller *hose, *tmp;	struct pci_bus *bus;#ifdef CONFIG_PPC_ISERIES	iSeries_pcibios_init(); #endif	printk("PCI: Probing PCI hardware\n");	/* Scan all of the recorded PCI controllers.  */	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {		hose->last_busno = 0xff;		bus = pci_scan_bus(hose->first_busno, hose->ops,				   hose->arch_data);		hose->bus = bus;		hose->last_busno = bus->subordinate;	}#ifndef CONFIG_PPC_ISERIES	if (pci_probe_only)		pcibios_claim_of_setup();	else		/* FIXME: `else' will be removed when		   pci_assign_unassigned_resources() is able to work		   correctly with [partially] allocated PCI tree. */		pci_assign_unassigned_resources();#endif /* !CONFIG_PPC_ISERIES */	/* Call machine dependent final fixup */	if (ppc_md.pcibios_fixup)		ppc_md.pcibios_fixup();	/* Cache the location of the ISA bridge (if we have one) */	ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);	if (ppc64_isabridge_dev != NULL)		printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));	printk("PCI: Probing PCI hardware done\n");	return 0;}subsys_initcall(pcibios_init);char __init *pcibios_setup(char *str){	return str;}int pcibios_enable_device(struct pci_dev *dev, int mask){	u16 cmd, oldcmd;	int i;	pci_read_config_word(dev, PCI_COMMAND, &cmd);	oldcmd = cmd;	for (i = 0; i < PCI_NUM_RESOURCES; i++) {		struct resource *res = &dev->resource[i];		/* Only set up the requested stuff */		if (!(mask & (1<<i)))			continue;		if (res->flags & IORESOURCE_IO)			cmd |= PCI_COMMAND_IO;		if (res->flags & IORESOURCE_MEM)			cmd |= PCI_COMMAND_MEMORY;	}	if (cmd != oldcmd) {		printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",		       pci_name(dev), cmd);                /* Enable the appropriate bits in the PCI command register.  */		pci_write_config_word(dev, PCI_COMMAND, cmd);	}	return 0;}/* * Return the domain number for this bus. */int pci_domain_nr(struct pci_bus *bus){#ifdef CONFIG_PPC_ISERIES	return 0;#else	struct pci_controller *hose = pci_bus_to_host(bus);	return hose->global_number;#endif}EXPORT_SYMBOL(pci_domain_nr);/* Set the name of the bus as it appears in /proc/bus/pci */int pci_name_bus(char *name, struct pci_bus *bus){#ifndef CONFIG_PPC_ISERIES	struct pci_controller *hose = pci_bus_to_host(bus);	if (hose->buid)		sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);	else#endif		sprintf(name, "%02x", bus->number);	return 0;}/* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. *  -- paulus. *//* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap.  They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */static __inline__ int __pci_mmap_make_offset(struct pci_dev *dev,					     struct vm_area_struct *vma,					     enum pci_mmap_state mmap_state){	struct pci_controller *hose = pci_bus_to_host(dev->bus);	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;	unsigned long io_offset = 0;	int i, res_bit;	if (hose == 0)		return -EINVAL;		/* should never happen */	/* If memory, add on the PCI bridge address offset */	if (mmap_state == pci_mmap_mem) {		offset += hose->pci_mem_offset;		res_bit = IORESOURCE_MEM;	} else {		io_offset = (unsigned long)hose->io_base_virt;		offset += io_offset;		res_bit = IORESOURCE_IO;	}	/*	 * Check that the offset requested corresponds to one of the	 * resources of the device.	 */	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {		struct resource *rp = &dev->resource[i];		int flags = rp->flags;		/* treat ROM as memory (should be already) */		if (i == PCI_ROM_RESOURCE)			flags |= IORESOURCE_MEM;		/* Active and same type? */		if ((flags & res_bit) == 0)			continue;		/* In the range of this resource? */		if (offset < (rp->start & PAGE_MASK) || offset > rp->end)			continue;		/* found it! construct the final physical address */		if (mmap_state == pci_mmap_io)			offset += hose->io_base_phys - io_offset;		vma->vm_pgoff = offset >> PAGE_SHIFT;		return 0;	}	return -EINVAL;}/* * Set vm_flags of VMA, as appropriate for this architecture, for a pci device * mapping. */static __inline__ void __pci_mmap_set_flags(struct pci_dev *dev,					    struct vm_area_struct *vma,					    enum pci_mmap_state mmap_state){	vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;}/* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */static __inline__ void __pci_mmap_set_pgprot(struct pci_dev *dev,					     struct vm_area_struct *vma,					     enum pci_mmap_state mmap_state,					     int write_combine){	long prot = pgprot_val(vma->vm_page_prot);	/* XXX would be nice to have a way to ask for write-through */	prot |= _PAGE_NO_CACHE;	if (!write_combine)		prot |= _PAGE_GUARDED;	vma->vm_page_prot = __pgprot(prot);}/* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture.  The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,			enum pci_mmap_state mmap_state,			int write_combine){	int ret;	ret = __pci_mmap_make_offset(dev, vma, mmap_state);	if (ret < 0)		return ret;	__pci_mmap_set_flags(dev, vma, mmap_state);	__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?