pcibr_dvr.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,108 行 · 第 1/5 页

C
2,108
字号
	    /* Device is capable of A64 operations,	     * and the attributes of the DMA are	     * consistent with any previous DMA	     * mappings using shared resources.	     */	    pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);	    pcibr_dmamap->bd_flags = flags;	    pcibr_dmamap->bd_xio_addr = 0;	    pcibr_dmamap->bd_pci_addr = pci_addr;	    /* If in PCI mode, make sure we have an RRB (or two). 	     */	    if (IS_PCI(pcibr_soft) && 		!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {		if (flags & PCIBR_VCHAN1)		    vchan = VCHAN1;		have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];		if (have_rrbs < 2) {		    if (pci_addr & PCI64_ATTR_PREF)			min_rrbs = 2;		    else			min_rrbs = 1;		    if (have_rrbs < min_rrbs)			pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,					       min_rrbs - have_rrbs);		}	    }	    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,		 	"pcibr_dmamap_alloc: using direct64, map=0x%lx\n",			pcibr_dmamap));	    return pcibr_dmamap;	}	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,		    "pcibr_dmamap_alloc: unable to use direct64\n"));	/* PIC in PCI-X mode only supports 64-bit direct mapping so	 * don't fall thru and try 32-bit direct mapping or 32-bit	 * page mapping	 */	if (IS_PCIX(pcibr_soft)) {	    kfree(pcibr_dmamap);	    return 0;	}	flags &= ~PCIIO_DMA_A64;    }    if (flags & PCIIO_FIXED) {	/* warning: mappings may fail later,	 * if direct32 can't get to the address.	 */	if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {	    /* User desires DIRECT A32 operations,	     * and the attributes of the DMA are	     * consistent with any previous DMA	     * mappings using shared resources.	     * Mapping calls may fail if target	     * is outside the direct32 range.	     */	    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,			"pcibr_dmamap_alloc: using direct32, map=0x%lx\n", 			pcibr_dmamap));	    pcibr_dmamap->bd_flags = flags;	    pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;	    pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;	    return pcibr_dmamap;	}	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,		    "pcibr_dmamap_alloc: unable to use direct32\n"));	/* If the user demands FIXED and we can't	 * give it to him, fail.	 */	xtalk_dmamap_free(xtalk_dmamap);	free_pciio_dmamap(pcibr_dmamap);	return 0;    }    /*     * Allocate Address Translation Entries from the mapping RAM.     * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,     * the maximum number of ATEs is based on the worst-case     * scenario, where the requested target is in the     * last byte of an ATE; thus, mapping IOPGSIZE+2     * does end up requiring three ATEs.     */    if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {	ate_count = IOPG((IOPGSIZE - 1)	/* worst case start offset */		     +req_size_max	/* max mapping bytes */		     - 1) + 1;		/* round UP */    } else {	/* assume requested target is page aligned */	ate_count = IOPG(req_size_max   /* max mapping bytes */		     - 1) + 1;		/* round UP */    }    ate_index = pcibr_ate_alloc(pcibr_soft, ate_count, &pcibr_dmamap->resource);    if (ate_index != -1) {	if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {	    bridge_ate_t            ate_proto;	    int                     have_rrbs;	    int                     min_rrbs;	    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,			"pcibr_dmamap_alloc: using PMU, ate_index=%d, "			"pcibr_dmamap=0x%lx\n", ate_index, pcibr_dmamap));	    ate_proto = pcibr_flags_to_ate(pcibr_soft, flags);	    pcibr_dmamap->bd_flags = flags;	    pcibr_dmamap->bd_pci_addr =		PCI32_MAPPED_BASE + IOPGSIZE * ate_index;	    if (flags & PCIIO_BYTE_STREAM)		    ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);	    /*	     * If swap was set in bss_device in pcibr_endian_set()	     * we need to change the address bit.	     */	    if (pcibr_soft->bs_slot[slot].bss_device & 						BRIDGE_DEV_SWAP_PMU)		    ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);	    if (flags & PCIIO_WORD_VALUES)		    ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);	    pcibr_dmamap->bd_xio_addr = 0;	    pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);	    pcibr_dmamap->bd_ate_index = ate_index;	    pcibr_dmamap->bd_ate_count = ate_count;	    pcibr_dmamap->bd_ate_proto = ate_proto;	    /* Make sure we have an RRB (or two).	     */	    if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {		have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];		if (have_rrbs < 2) {		    if (ate_proto & ATE_PREF)			min_rrbs = 2;		    else			min_rrbs = 1;		    if (have_rrbs < min_rrbs)			pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,					       min_rrbs - have_rrbs);		}	    }	    return pcibr_dmamap;	}	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,		    "pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",		    ate_index));	pcibr_ate_free(pcibr_soft, ate_index, ate_count, &pcibr_dmamap->resource);    }    /* total failure: sorry, you just can't     * get from here to there that way.     */    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,		"pcibr_dmamap_alloc: complete failure.\n"));    xtalk_dmamap_free(xtalk_dmamap);    free_pciio_dmamap(pcibr_dmamap);    return 0;}/*ARGSUSED */voidpcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap){    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;    pciio_slot_t            slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,							pcibr_dmamap->bd_slot);    xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);    if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {	pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);    }    if (pcibr_dmamap->bd_ate_count) {	pcibr_ate_free(pcibr_dmamap->bd_soft,		       pcibr_dmamap->bd_ate_index,		       pcibr_dmamap->bd_ate_count,		       &pcibr_dmamap->resource);	pcibr_release_device(pcibr_soft, slot, XBRIDGE_DEV_PMU_BITS);    }    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,		"pcibr_dmamap_free: pcibr_dmamap=0x%lx\n", pcibr_dmamap));    free_pciio_dmamap(pcibr_dmamap);}/* *    pcibr_addr_xio_to_pci: given a PIO range, hand *      back the corresponding base PCI MEM address; *      this is used to short-circuit DMA requests that *      loop back onto this PCI bus. */static iopaddr_tpcibr_addr_xio_to_pci(pcibr_soft_t soft,		      iopaddr_t xio_addr,		      size_t req_size){    iopaddr_t               xio_lim = xio_addr + req_size - 1;    iopaddr_t               pci_addr;    pciio_slot_t            slot;    if (IS_PIC_BUSNUM_SOFT(soft, 0)) {    	if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&	    (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {	    pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;	    return pci_addr;    	}    	if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&	    (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {	    pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;	    return pci_addr;    	}    } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {    	if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&	    (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {	    pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;	    return pci_addr;    	}    	if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&	    (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {	    pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;	    return pci_addr;    	}    } else {	printk("pcibr_addr_xio_to_pci(): unknown bridge type");	return (iopaddr_t)0;    }    for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)	if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&	    (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {	    uint64_t		dev;	    dev = soft->bs_slot[slot].bss_device;	    pci_addr = dev & BRIDGE_DEV_OFF_MASK;	    pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;	    pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);	    return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;	}    return 0;}/*ARGSUSED */iopaddr_tpcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,		  paddr_t paddr,		  size_t req_size){    pcibr_soft_t            pcibr_soft;    iopaddr_t               xio_addr;    xwidgetnum_t            xio_port;    iopaddr_t               pci_addr;    unsigned                flags;    ASSERT(pcibr_dmamap != NULL);    ASSERT(req_size > 0);    ASSERT(req_size <= pcibr_dmamap->bd_max_size);    pcibr_soft = pcibr_dmamap->bd_soft;    flags = pcibr_dmamap->bd_flags;    xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);    if (XIO_PACKED(xio_addr)) {	xio_port = XIO_PORT(xio_addr);	xio_addr = XIO_ADDR(xio_addr);    } else	xio_port = pcibr_dmamap->bd_xio_port;    /* If this DMA is to an address that     * refers back to this Bridge chip,     * reduce it back to the correct     * PCI MEM address.     */    if (xio_port == pcibr_soft->bs_xid) {	pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);    } else if (flags & PCIIO_DMA_A64) {	/* A64 DMA:	 * always use 64-bit direct mapping,	 * which always works.	 * Device(x) was set up during	 * dmamap allocation.	 */	/* attributes are already bundled up into bd_pci_addr.	 */	pci_addr = pcibr_dmamap->bd_pci_addr	    | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)	    | xio_addr;	/* Bridge Hardware WAR #482836:	 * If the transfer is not cache aligned	 * and the Bridge Rev is <= B, force	 * prefetch to be off.	 */	if (flags & PCIBR_NOPREFETCH)	    pci_addr &= ~PCI64_ATTR_PREF;	PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, 		    pcibr_dmamap->bd_dev,		    "pcibr_dmamap_addr: (direct64): wanted paddr [0x%lx..0x%lx] "		    "XIO port 0x%x offset 0x%lx, returning PCI 0x%lx\n",		    paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));    } else if (flags & PCIIO_FIXED) {	/* A32 direct DMA:	 * always use 32-bit direct mapping,	 * which may fail.	 * Device(x) was set up during	 * dmamap allocation.	 */	if (xio_port != pcibr_soft->bs_dir_xport)	    pci_addr = 0;		/* wrong DIDN */	else if (xio_addr < pcibr_dmamap->bd_xio_addr)	    pci_addr = 0;		/* out of range */	else if ((xio_addr + req_size) >		 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))	    pci_addr = 0;		/* out of range */	else	    pci_addr = pcibr_dmamap->bd_pci_addr +		xio_addr - pcibr_dmamap->bd_xio_addr;	PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, 		    pcibr_dmamap->bd_dev,		    "pcibr_dmamap_addr (direct32): wanted paddr [0x%lx..0x%lx] "		    "XIO port 0x%x offset 0x%lx, returning PCI 0x%lx\n",		    paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));    } else {	iopaddr_t               offset = IOPGOFF(xio_addr);	bridge_ate_t            ate_proto = pcibr_dmamap->bd_ate_proto;	int                     ate_count = IOPG(offset + req_size - 1) + 1;	int                     ate_index = pcibr_dmamap->bd_ate_index;	bridge_ate_t            ate;	ate = ate_proto | (xio_addr - offset);	ate |= (xio_port << ATE_TIDSHIFT);	pci_addr = pcibr_dmamap->bd_pci_addr + offset;	/* Fill in our mapping registers	 * with the appropriate xtalk data,	 * and hand back the PCI address.	 */	ASSERT(ate_count > 0);	if (ate_count <= pcibr_dmamap->bd_ate_count) {		ate_write(pcibr_soft, ate_index, ate_count, ate);		PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,			    "pcibr_dmamap_addr (PMU) : wanted paddr "			    "[0x%lx..0x%lx] returning PCI 0x%lx\n", 			    paddr, paddr + req_size - 1, pci_addr));	} else {		/* The number of ATE's required is greater than the number		 * allocated for this map. One way this can happen is if		 * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP		 * flag, and then when that map is used (right now), the		 * target address tells us we really did need to roundup.		 * The other possibility is that the map is just plain too		 * small to handle the requested target area.		 */		PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev, 		            "pcibr_dmamap_addr (PMU) : wanted paddr "			    "[0x%lx..0x%lx] ate_count 0x%x bd_ate_count 0x%x "			    "ATE's required > number allocated\n",			     paddr, paddr + req_size - 1,			     ate_count, pcibr_dmamap->bd_ate_count));		pci_addr = 0;	}    }    return pci_addr;}/*ARGSUSED */voidpcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap){    xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);    PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,		"pcibr_dmamap_done: pcibr_dmamap=0x%lx\n", pcibr_dmamap));}/* * For each bridge, the DIR_OFF value in the Direct Mapping Register * determines the PCI to Crosstalk memory mapping to be used for all * 32-bit Direct Mapping memory accesses. This mapping can be to any * node in the system. This function will return that compact node id. *//*ARGSUSED */cnodeid_tpcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl){	pciio_info_t	pciio_info = pciio_info_get(pconn_vhdl);	pcibr_soft_t	pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);	return nasid_to_cnodeid(NASID_GET(pcibr_soft->bs_dir_xbase));}/*ARGSUSED */iopaddr_tpcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,		    device_desc_t dev_desc,		    paddr_t paddr,		    size_t req_size,		    unsigned flags){    pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);    pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);    vertex_hdl_t            xconn_vhdl = pcibr

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?