pci.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 807 行 · 第 1/2 页
C
807 行
if (pci_command & PCI_COMMAND_MASTER) { pci_command &= ~PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); }}/** * pci_enable_wake - enable device to generate PME# when suspended * @dev: - PCI device to operate on * @state: - Current state of device. * @enable: - Flag to enable or disable generation * * Set the bits in the device's PM Capabilities to generate PME# when * the system is suspended. * * -EIO is returned if device doesn't have PM Capabilities. * -EINVAL is returned if device supports it, but can't generate wake events. * 0 if operation is successful. * */int pci_enable_wake(struct pci_dev *dev, u32 state, int enable){ int pm; u16 value; /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); /* If device doesn't support PM Capabilities, but request is to disable * wake events, it's a nop; otherwise fail */ if (!pm) return enable ? -EIO : 0; /* Check device's ability to generate PME# */ pci_read_config_word(dev,pm+PCI_PM_PMC,&value); value &= PCI_PM_CAP_PME_MASK; value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ /* Check if it can generate PME# from requested state. */ if (!value || !(value & (1 << state))) return enable ? -EINVAL : 0; pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); /* Clear PME_Status by writing 1 to it and enable PME# */ value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; if (!enable) value &= ~PCI_PM_CTRL_PME_ENABLE; pci_write_config_word(dev, pm + PCI_PM_CTRL, value); return 0;}intpci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge){ u8 pin; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) return -1; pin--; while (dev->bus->self) { pin = (pin + PCI_SLOT(dev->devfn)) % 4; dev = dev->bus->self; } *bridge = dev; return pin;}/** * pci_release_region - Release a PCI bar * @pdev: PCI device whose resources were previously reserved by pci_request_region * @bar: BAR to release * * Releases the PCI I/O and memory resources previously reserved by a * successful call to pci_request_region. Call this function only * after all use of the PCI regions has ceased. */void pci_release_region(struct pci_dev *pdev, int bar){ if (pci_resource_len(pdev, bar) == 0) return; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) release_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) release_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar));}/** * pci_request_region - Reserved PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved * @res_name: Name to be associated with resource. * * Mark the PCI region associated with PCI device @pdev BR @bar as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */int pci_request_region(struct pci_dev *pdev, int bar, char *res_name){ if (pci_resource_len(pdev, bar) == 0) return 0; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!request_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name)) goto err_out; } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { if (!request_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name)) goto err_out; } return 0;err_out: printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n", pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", bar + 1, /* PCI BAR # */ pci_resource_len(pdev, bar), pci_resource_start(pdev, bar), pci_name(pdev)); return -EBUSY;}/** * pci_release_regions - Release reserved PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved by pci_request_regions * * Releases all PCI I/O and memory resources previously reserved by a * successful call to pci_request_regions. Call this function only * after all use of the PCI regions has ceased. */void pci_release_regions(struct pci_dev *pdev){ int i; for (i = 0; i < 6; i++) pci_release_region(pdev, i);}/** * pci_request_regions - Reserved PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @res_name: Name to be associated with resource. * * Mark all PCI regions associated with PCI device @pdev as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */int pci_request_regions(struct pci_dev *pdev, char *res_name){ int i; for (i = 0; i < 6; i++) if(pci_request_region(pdev, i, res_name)) goto err_out; return 0;err_out: while(--i >= 0) pci_release_region(pdev, i); return -EBUSY;}/** * pci_set_master - enables bus-mastering for device dev * @dev: the PCI device to enable * * Enables bus-mastering on the device and calls pcibios_set_master() * to do the needed arch specific settings. */voidpci_set_master(struct pci_dev *dev){ u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_MASTER)) { DBG("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); cmd |= PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, cmd); } dev->is_busmaster = 1; pcibios_set_master(dev);}#ifndef HAVE_ARCH_PCI_MWI/* This can be overridden by arch code. */u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;/** * pci_generic_prep_mwi - helper function for pci_set_mwi * @dev: the PCI device for which MWI is enabled * * Helper function for generic implementation of pcibios_prep_mwi * function. Originally copied from drivers/net/acenic.c. * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */static intpci_generic_prep_mwi(struct pci_dev *dev){ u8 cacheline_size; if (!pci_cache_line_size) return -EINVAL; /* The system doesn't support MWI. */ /* Validate current setting: the PCI_CACHE_LINE_SIZE must be equal to or multiple of the right value. */ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); if (cacheline_size >= pci_cache_line_size && (cacheline_size % pci_cache_line_size) == 0) return 0; /* Write the correct value. */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); /* Read it back. */ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); if (cacheline_size == pci_cache_line_size) return 0; printk(KERN_DEBUG "PCI: cache line size of %d is not supported " "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); return -EINVAL;}#endif /* !HAVE_ARCH_PCI_MWI *//** * pci_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, * and then calls @pcibios_set_mwi to do the needed arch specific * operations or a generic mwi-prep function. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */intpci_set_mwi(struct pci_dev *dev){ int rc; u16 cmd;#ifdef HAVE_ARCH_PCI_MWI rc = pcibios_prep_mwi(dev);#else rc = pci_generic_prep_mwi(dev);#endif if (rc) return rc; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_INVALIDATE)) { DBG("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0;}/** * pci_clear_mwi - disables Memory-Write-Invalidate for device dev * @dev: the PCI device to disable * * Disables PCI Memory-Write-Invalidate transaction on the device */voidpci_clear_mwi(struct pci_dev *dev){ u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INVALIDATE) { cmd &= ~PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); }}#ifndef HAVE_ARCH_PCI_SET_DMA_MASK/* * These can be overridden by arch-specific implementations */intpci_set_dma_mask(struct pci_dev *dev, u64 mask){ if (!pci_dma_supported(dev, mask)) return -EIO; dev->dma_mask = mask; return 0;} intpci_dac_set_dma_mask(struct pci_dev *dev, u64 mask){ if (!pci_dac_dma_supported(dev, mask)) return -EIO; dev->dma_mask = mask; return 0;}intpci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask){ if (!pci_dma_supported(dev, mask)) return -EIO; dev->dev.coherent_dma_mask = mask; return 0;}#endif static int __devinit pci_init(void){ struct pci_dev *dev = NULL; while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_fixup_device(pci_fixup_final, dev); } return 0;}static int __devinit pci_setup(char *str){ while (str) { char *k = strchr(str, ','); if (k) *k++ = 0; if (*str && (str = pcibios_setup(str)) && *str) { /* PCI layer options should be handled here */ printk(KERN_ERR "PCI: Unknown option `%s'\n", str); } str = k; } return 1;}device_initcall(pci_init);__setup("pci=", pci_setup);#if defined(CONFIG_ISA) || defined(CONFIG_EISA)/* FIXME: Some boxes have multiple ISA bridges! */struct pci_dev *isa_bridge;EXPORT_SYMBOL(isa_bridge);#endifEXPORT_SYMBOL(pci_enable_device_bars);EXPORT_SYMBOL(pci_enable_device);EXPORT_SYMBOL(pci_disable_device);EXPORT_SYMBOL(pci_max_busnr);EXPORT_SYMBOL(pci_bus_max_busnr);EXPORT_SYMBOL(pci_find_capability);EXPORT_SYMBOL(pci_bus_find_capability);EXPORT_SYMBOL(pci_release_regions);EXPORT_SYMBOL(pci_request_regions);EXPORT_SYMBOL(pci_release_region);EXPORT_SYMBOL(pci_request_region);EXPORT_SYMBOL(pci_set_master);EXPORT_SYMBOL(pci_set_mwi);EXPORT_SYMBOL(pci_clear_mwi);EXPORT_SYMBOL(pci_set_dma_mask);EXPORT_SYMBOL(pci_dac_set_dma_mask);EXPORT_SYMBOL(pci_set_consistent_dma_mask);EXPORT_SYMBOL(pci_assign_resource);EXPORT_SYMBOL(pci_find_parent_resource);EXPORT_SYMBOL(pci_set_power_state);EXPORT_SYMBOL(pci_save_state);EXPORT_SYMBOL(pci_restore_state);EXPORT_SYMBOL(pci_enable_wake);/* Quirk info */EXPORT_SYMBOL(isa_dma_bridge_buggy);EXPORT_SYMBOL(pci_pci_problems);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?