📄 rte_mb_a_pci.c
字号:
mb_sram_free_areas = fa->next; /* Put it on the free-list-entry-free-list. */ fa->next = mb_sram_free_free_areas; mb_sram_free_free_areas = fa; } else { /* FA is bigger than SIZE, so just reduce its size to account for this allocation. */ fa->mem += size; fa->size -= size; } break; } spin_unlock_irqrestore (mb_sram_lock, flags); return mem;}/* Return the memory area MEM of size SIZE to the MB SRAM free pool. */static void free_mb_sram (void *mem, size_t size){ struct mb_sram_free_area *prev, *fa, *new_fa; int flags; void *end = mem + size; spin_lock_irqsave (mb_sram_lock, flags); retry: /* Find an adjacent free-list entry. */ for (prev = 0, fa = mb_sram_free_areas; fa; prev = fa, fa = fa->next) if (fa->mem == end) { /* FA is just after MEM, grow down to encompass it. */ fa->mem = mem; fa->size += size; goto done; } else if (fa->mem + fa->size == mem) { struct mb_sram_free_area *next_fa = fa->next; /* FA is just before MEM, expand to encompass it. */ fa->size += size; /* See if FA can now be merged with its successor. */ if (next_fa && fa->mem + fa->size == next_fa->mem) { /* Yup; merge NEXT_FA's info into FA. */ fa->size += next_fa->size; fa->next = next_fa->next; /* Free NEXT_FA. */ next_fa->next = mb_sram_free_free_areas; mb_sram_free_free_areas = next_fa; } goto done; } else if (fa->mem > mem) /* We've reached the right spot in the free-list without finding an adjacent free-area, so add a new free area to hold mem. */ break; /* Make a new free-list entry. */ /* First, get a free-list entry. */ if (! mb_sram_free_free_areas) { /* There are none, so make some. */ void *block; size_t block_size = sizeof (struct mb_sram_free_area) * 8; /* Don't hold the lock while calling kmalloc (I'm not sure whether it would be a problem, since we use GFP_ATOMIC, but it makes me nervous). */ spin_unlock_irqrestore (mb_sram_lock, flags); block = kmalloc (block_size, GFP_ATOMIC); if (! block) panic ("free_mb_sram: can't allocate free-list entry"); /* Now get the lock back. */ spin_lock_irqsave (mb_sram_lock, flags); /* Add the new free free-list entries. */ while (block_size > 0) { struct mb_sram_free_area *nfa = block; nfa->next = mb_sram_free_free_areas; mb_sram_free_free_areas = nfa; block += sizeof *nfa; block_size -= sizeof *nfa; } /* Since we dropped the lock to call kmalloc, the free-list could have changed, so retry from the beginning. */ goto retry; } /* Remove NEW_FA from the free-list of free-list entries. */ new_fa = mb_sram_free_free_areas; mb_sram_free_free_areas = new_fa->next; /* NEW_FA initially holds only MEM. */ new_fa->mem = mem; new_fa->size = size; /* Insert NEW_FA in the free-list between PREV and FA. */ new_fa->next = fa; if (prev) prev->next = new_fa; else mb_sram_free_areas = new_fa; done: spin_unlock_irqrestore (mb_sram_lock, flags);}/* Maintainence of CPU -> Mother-A DMA mappings. */struct dma_mapping { void *cpu_addr; void *mb_sram_addr; size_t size; struct dma_mapping *next;};/* A list of mappings from CPU addresses to MB SRAM addresses for active DMA blocks (that have been `granted' to the PCI device). */static struct dma_mapping *active_dma_mappings = 0;/* A list of free mapping objects. */static struct dma_mapping *free_dma_mappings = 0;/* Spinlock protecting the above globals. */static spinlock_t dma_mappings_lock = SPIN_LOCK_UNLOCKED;static struct dma_mapping *new_dma_mapping (size_t size){ int flags; struct dma_mapping *mapping; void *mb_sram_block = alloc_mb_sram (size); if (! mb_sram_block) return 0; spin_lock_irqsave (dma_mappings_lock, flags); if (! free_dma_mappings) { /* We're out of mapping structures, make more. */ void *mblock; size_t mblock_size = sizeof (struct dma_mapping) * 8; /* Don't hold the lock while calling kmalloc (I'm not sure whether it would be a problem, since we use GFP_ATOMIC, but it makes me nervous). */ spin_unlock_irqrestore (dma_mappings_lock, flags); mblock = kmalloc (mblock_size, GFP_ATOMIC); if (! mblock) { free_mb_sram (mb_sram_block, size); return 0; } /* Get the lock back. */ spin_lock_irqsave (dma_mappings_lock, flags); /* Add the new mapping structures to the free-list. */ while (mblock_size > 0) { struct dma_mapping *fm = mblock; fm->next = free_dma_mappings; free_dma_mappings = fm; mblock += sizeof *fm; mblock_size -= sizeof *fm; } } /* Get a mapping struct from the freelist. */ mapping = free_dma_mappings; free_dma_mappings = mapping->next; /* Initialize the mapping. Other fields should be filled in by caller. */ mapping->mb_sram_addr = mb_sram_block; mapping->size = size; /* Add it to the list of active mappings. */ mapping->next = active_dma_mappings; active_dma_mappings = mapping; spin_unlock_irqrestore (dma_mappings_lock, flags); return mapping;}static struct dma_mapping *find_dma_mapping (void *mb_sram_addr){ int flags; struct dma_mapping *mapping; spin_lock_irqsave (dma_mappings_lock, flags); for (mapping = active_dma_mappings; mapping; mapping = mapping->next) if (mapping->mb_sram_addr == mb_sram_addr) { spin_unlock_irqrestore (dma_mappings_lock, flags); return mapping; } panic ("find_dma_mapping: unmapped PCI DMA addr 0x%x", MB_SRAM_TO_PCI (mb_sram_addr));}static struct dma_mapping *deactivate_dma_mapping (void *mb_sram_addr){ int flags; struct dma_mapping *mapping, *prev; spin_lock_irqsave (dma_mappings_lock, flags); for (prev = 0, mapping = active_dma_mappings; mapping; prev = mapping, mapping = mapping->next) { if (mapping->mb_sram_addr == mb_sram_addr) { /* This is the MAPPING; deactivate it. */ if (prev) prev->next = mapping->next; else active_dma_mappings = mapping->next; spin_unlock_irqrestore (dma_mappings_lock, flags); return mapping; } } panic ("deactivate_dma_mapping: unmapped PCI DMA addr 0x%x", MB_SRAM_TO_PCI (mb_sram_addr));}/* Return MAPPING to the freelist. */static inline voidfree_dma_mapping (struct dma_mapping *mapping){ int flags; free_mb_sram (mapping->mb_sram_addr, mapping->size); spin_lock_irqsave (dma_mappings_lock, flags); mapping->next = free_dma_mappings; free_dma_mappings = mapping; spin_unlock_irqrestore (dma_mappings_lock, flags);}/* Single PCI DMA mappings. *//* `Grant' to PDEV the memory block at CPU_ADDR, for doing DMA. The 32-bit PCI bus mastering address to use is returned. the device owns this memory until either pci_unmap_single or pci_dma_sync_single is performed. */dma_addr_tpci_map_single (struct pci_dev *pdev, void *cpu_addr, size_t size, int dir){ struct dma_mapping *mapping = new_dma_mapping (size); if (! mapping) return 0; mapping->cpu_addr = cpu_addr; if (dir == PCI_DMA_BIDIRECTIONAL || dir == PCI_DMA_TODEVICE) memcpy (mapping->mb_sram_addr, cpu_addr, size); return MB_SRAM_TO_PCI (mapping->mb_sram_addr);}/* Return to the CPU the PCI DMA memory block previously `granted' to PDEV, at DMA_ADDR. */void pci_unmap_single (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, int dir){ void *mb_sram_addr = PCI_TO_MB_SRAM (dma_addr); struct dma_mapping *mapping = deactivate_dma_mapping (mb_sram_addr); if (size != mapping->size) panic ("pci_unmap_single: size (%d) doesn't match" " size of mapping at PCI DMA addr 0x%x (%d)\n", size, dma_addr, mapping->size); /* Copy back the DMA'd contents if necessary. */ if (dir == PCI_DMA_BIDIRECTIONAL || dir == PCI_DMA_FROMDEVICE) memcpy (mapping->cpu_addr, mb_sram_addr, size); /* Return mapping to the freelist. */ free_dma_mapping (mapping);}/* Make physical memory consistant for a single streaming mode DMA translation after a transfer. If you perform a pci_map_single() but wish to interrogate the buffer using the cpu, yet do not wish to teardown the PCI dma mapping, you must call this function before doing so. At the next point you give the PCI dma address back to the card, the device again owns the buffer. */voidpci_dma_sync_single (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, int dir){ void *mb_sram_addr = PCI_TO_MB_SRAM (dma_addr); struct dma_mapping *mapping = find_dma_mapping (mb_sram_addr); /* Synchronize the DMA buffer with the CPU buffer if necessary. */ if (dir == PCI_DMA_FROMDEVICE) memcpy (mapping->cpu_addr, mb_sram_addr, size); else if (dir == PCI_DMA_TODEVICE) memcpy (mb_sram_addr, mapping->cpu_addr, size); else panic("pci_dma_sync_single: unsupported sync dir: %d", dir);}/* Scatter-gather PCI DMA mappings. *//* Do multiple DMA mappings at once. */intpci_map_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len, int dir){ BUG (); return 0;}/* Unmap multiple DMA mappings at once. */voidpci_unmap_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len,int dir){ BUG ();}/* Make physical memory consistant for a set of streaming mode DMA translations after a transfer. The same as pci_dma_sync_single but for a scatter-gather list, same rules and usage. */voidpci_dma_sync_sg (struct pci_dev *dev, struct scatterlist *sg, int sg_len, int dir){ BUG ();}/* PCI mem mapping. *//* Allocate and map kernel buffer using consistent mode DMA for PCI device. Returns non-NULL cpu-view pointer to the buffer if successful and sets *DMA_ADDR to the pci side dma address as well, else DMA_ADDR is undefined. */void *pci_alloc_consistent (struct pci_dev *pdev, size_t size, dma_addr_t *dma_addr){ void *mb_sram_mem = alloc_mb_sram (size); if (mb_sram_mem) *dma_addr = MB_SRAM_TO_PCI (mb_sram_mem); return mb_sram_mem;}/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must be values that were returned from pci_alloc_consistent. SIZE must be the same as what as passed into pci_alloc_consistent. References to the memory and mappings assosciated with CPU_ADDR or DMA_ADDR past this call are illegal. */voidpci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr, dma_addr_t dma_addr){ void *mb_sram_mem = PCI_TO_MB_SRAM (dma_addr); free_mb_sram (mb_sram_mem, size);}/* symbol exports (for modules) */EXPORT_SYMBOL (pci_map_single);EXPORT_SYMBOL (pci_unmap_single);EXPORT_SYMBOL (pci_alloc_consistent);EXPORT_SYMBOL (pci_free_consistent);EXPORT_SYMBOL (pci_dma_sync_single);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -