📄 sym53c8xx_comm.h
字号:
{ if (vaddr) iounmap((void *) (vaddr & PAGE_MASK));}#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED *//*==========================================================**** Insert a delay in micro-seconds and milli-seconds.**** Under Linux, udelay() is restricted to delay < ** 1 milli-second. In fact, it generally works for up ** to 1 second delay. Since 2.1.105, the mdelay() function ** is provided for delays in milli-seconds.** Under 2.0 kernels, udelay() is an inline function ** that is very inaccurate on Pentium processors.****==========================================================*/#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)#define UDELAY udelay#define MDELAY mdelay#elsestatic void UDELAY(long us) { udelay(us); }static void MDELAY(long ms) { while (ms--) UDELAY(1000); }#endif/*==========================================================**** Simple power of two buddy-like allocator.**** This simple code is not intended to be fast, but to ** provide power of 2 aligned memory allocations.** Since the SCRIPTS processor only supplies 8 bit ** arithmetic, this allocator allows simple and fast ** address calculations from the SCRIPTS code.** In addition, cache line alignment is guaranteed for ** power of 2 cache line size.** Enhanced in linux-2.3.44 to provide a memory pool ** per pcidev to support dynamic dma mapping. (I would ** have preferred a real bus astraction, btw).****==========================================================*/#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)#define __GetFreePages(flags, order) __get_free_pages(flags, order)#else#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)#endif#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */#if PAGE_SIZE >= 8192#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */#else#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */#endif#define MEMO_FREE_UNUSED /* Free unused pages immediately */#define MEMO_WARN 1#define MEMO_GFP_FLAGS GFP_ATOMIC#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */typedef pcidev_t m_bush_t; /* Something that addresses DMAable */typedef struct m_link { /* Link between free memory chunks */ struct m_link *next;} m_link_s;#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPINGtypedef struct m_vtob { /* Virtual to Bus address translation */ struct m_vtob *next; m_addr_t vaddr; m_addr_t baddr;} m_vtob_s;#define VTOB_HASH_SHIFT 5#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)#define VTOB_HASH_CODE(m) \ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)#endiftypedef struct m_pool { /* Memory pool of a given kind */#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING m_bush_t bush; m_addr_t (*getp)(struct m_pool *); void (*freep)(struct m_pool *, m_addr_t);#define M_GETP() mp->getp(mp)#define M_FREEP(p) mp->freep(mp, p)#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER) int nump; m_vtob_s *(vtob[VTOB_HASH_SIZE]); struct m_pool *next;#else#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];} m_pool_s;static void *___m_alloc(m_pool_s *mp, int size){ int i = 0; int s = (1 << MEMO_SHIFT); int j; m_addr_t a; m_link_s *h = mp->h; if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) return 0; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { h[j].next = (m_link_s *) M_GETP(); if (h[j].next) h[j].next->next = 0; break; } ++j; s <<= 1; } a = (m_addr_t) h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_s *) (a+s); h[j].next->next = 0; } }#ifdef DEBUG printk("___m_alloc(%d) = %p\n", size, (void *) a);#endif return (void *) a;}static void ___m_free(m_pool_s *mp, void *ptr, int size){ int i = 0; int s = (1 << MEMO_SHIFT); m_link_s *q; m_addr_t a, b; m_link_s *h = mp->h;#ifdef DEBUG printk("___m_free(%p, %d)\n", ptr, size);#endif if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) return; while (size > s) { s <<= 1; ++i; } a = (m_addr_t) ptr; while (1) {#ifdef MEMO_FREE_UNUSED if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { M_FREEP(a); break; }#endif b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_s *) b) { q = q->next; } if (!q->next) { ((m_link_s *) a)->next = h[i].next; h[i].next = (m_link_s *) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; }}static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags){ void *p; p = ___m_alloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) printk ("new %-10s[%4d] @%p.\n", name, size, p); if (p) bzero(p, size); else if (uflags & MEMO_WARN) printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size); return p;}#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)static void __m_free(m_pool_s *mp, void *ptr, int size, char *name){ if (DEBUG_FLAGS & DEBUG_ALLOC) printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___m_free(mp, ptr, size);}/* * With pci bus iommu support, we use a default pool of unmapped memory * for memory we donnot need to DMA from/to and one pool per pcidev for * memory accessed by the PCI chip. `mp0' is the default not DMAable pool. */#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPINGstatic m_pool_s mp0;#elsestatic m_addr_t ___mp0_getp(m_pool_s *mp){ m_addr_t m = GetPages(); if (m) ++mp->nump; return m;}static void ___mp0_freep(m_pool_s *mp, m_addr_t m){ FreePages(m); --mp->nump;}static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */static void *m_calloc(int size, char *name){ u_long flags; void *m; NCR_LOCK_DRIVER(flags); m = __m_calloc(&mp0, size, name); NCR_UNLOCK_DRIVER(flags); return m;}static void m_free(void *ptr, int size, char *name){ u_long flags; NCR_LOCK_DRIVER(flags); __m_free(&mp0, ptr, size, name); NCR_UNLOCK_DRIVER(flags);}/* * DMAable pools. */#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING/* Without pci bus iommu support, all the memory is assumed DMAable */#define __m_calloc_dma(b, s, n) m_calloc(s, n)#define __m_free_dma(b, p, s, n) m_free(p, s, n)#define __vtobus(b, p) virt_to_bus(p)#else/* * With pci bus iommu support, we maintain one pool per pcidev and a * hashed reverse table for virtual to bus physical address translations. */static m_addr_t ___dma_getp(m_pool_s *mp){ m_addr_t vp; m_vtob_s *vbp; vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB"); if (vbp) { dma_addr_t daddr; vp = (m_addr_t) pci_alloc_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER, &daddr); if (vp) { int hc = VTOB_HASH_CODE(vp); vbp->vaddr = vp; vbp->baddr = daddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return vp; } } if (vbp) __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0;}static void ___dma_freep(m_pool_s *mp, m_addr_t m){ m_vtob_s **vbpp, *vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER, (void *)vbp->vaddr, (dma_addr_t)vbp->baddr); __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; }}static inline m_pool_s *___get_dma_pool(m_bush_t bush){ m_pool_s *mp; for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next); return mp;}static m_pool_s *___cre_dma_pool(m_bush_t bush){ m_pool_s *mp; mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { bzero(mp, sizeof(*mp)); mp->bush = bush; mp->getp = ___dma_getp; mp->freep = ___dma_freep; mp->next = mp0.next; mp0.next = mp; } return mp;}static void ___del_dma_pool(m_pool_s *p){ struct m_pool **pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; __m_free(&mp0, p, sizeof(*p), "MPOOL"); }}static void *__m_calloc_dma(m_bush_t bush, int size, char *name){ u_long flags; struct m_pool *mp; void *m = 0; NCR_LOCK_DRIVER(flags); mp = ___get_dma_pool(bush); if (!mp) mp = ___cre_dma_pool(bush); if (mp) m = __m_calloc(mp, size, name); if (mp && !mp->nump) ___del_dma_pool(mp); NCR_UNLOCK_DRIVER(flags); return m;}static void __m_free_dma(m_bush_t bush, void *m, int size, char *name){ u_long flags; struct m_pool *mp; NCR_LOCK_DRIVER(flags); mp = ___get_dma_pool(bush); if (mp) __m_free(mp, m, size, name); if (mp && !mp->nump) ___del_dma_pool(mp); NCR_UNLOCK_DRIVER(flags);}static m_addr_t __vtobus(m_bush_t bush, void *m){ u_long flags; m_pool_s *mp; int hc = VTOB_HASH_CODE(m); m_vtob_s *vp = 0; m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; NCR_LOCK_DRIVER(flags); mp = ___get_dma_pool(bush); if (mp) { vp = mp->vtob[hc]; while (vp && (m_addr_t) vp->vaddr != a) vp = vp->next; } NCR_UNLOCK_DRIVER(flags); return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;}#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)#define _vtobus(np, p) __vtobus(np->pdev, p)#define vtobus(p) _vtobus(np, p)/* * Deal with DMA mapping/unmapping. */#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING/* Linux versions prior to pci bus iommu kernel interface */#define __unmap_scsi_data(pdev, cmd) do {; } while (0)#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)#define __sync_scsi_data(pdev, cmd) do {; } while (0)#define scsi_sg_dma_address(sc) vtobus((sc)->address)#define scsi_sg_dma_len(sc) ((sc)->length)#else/* Linux version with pci bus iommu kernel interface *//* To keep track of the dma mapping (sg/single) that has been set */#define __data_mapped SCp.phase#define __data_mapping SCp.have_data_instatic void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd){ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); switch(cmd->__data_mapped) { case 2: pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); break; case 1: pci_unmap_single(pdev, cmd->__data_mapping, cmd->request_bufflen, dma_dir); break; } cmd->__data_mapped = 0;}static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd){ dma_addr_t mapping; int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); if (cmd->request_bufflen == 0) return 0; mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, dma_dir); cmd->__data_mapped = 1; cmd->__data_mapping = mapping; return mapping;}static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd){ int use_sg; int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); if (cmd->use_sg == 0) return 0; use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); cmd->__data_mapped = 2; cmd->__data_mapping = use_sg; return use_sg;}static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd){ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); switch(cmd->__data_mapped) { case 2: pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); break; case 1: pci_dma_sync_single(pdev, cmd->__data_mapping, cmd->request_bufflen, dma_dir); break; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -