⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tioce_provider.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	u64 dma_mask;	dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask;	/* cards must be able to address at least 31 bits */	if (dma_mask < 0x7fffffffUL)		return 0;	if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)		ct_addr = PHYS_TO_TIODMA(paddr);	else		ct_addr = paddr;	/*	 * If the device can generate 64 bit addresses, create a D64 map.	 */	if (dma_mask == ~0UL) {		mapaddr = tioce_dma_d64(ct_addr, dma_flags);		if (mapaddr)			goto dma_map_done;	}	pcidev_to_tioce(pdev, NULL, &ce_kern, &port);	spin_lock_irqsave(&ce_kern->ce_lock, flags);	/*	 * D64 didn't work ... See if we have an existing map that covers	 * this address range.  Must account for devices dma_mask here since	 * an existing map might have been done in a mode using more pci	 * address bits than this device can support.	 */	list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) {		u64 last;		last = map->ct_start + map->nbytes - 1;		if (ct_addr >= map->ct_start &&		    ct_addr + byte_count - 1 <= last &&		    map->pci_start <= dma_mask) {			map->refcnt++;			mapaddr = map->pci_start + (ct_addr - map->ct_start);			break;		}	}	/*	 * If we don't have a map yet, and the card can generate 40	 * bit addresses, try the M40/M40S modes.  Note these modes do not	 * support a barrier bit, so if we need a consistent map these	 * won't work.	 */	if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {		/*		 * We have two options for 40-bit mappings:  16GB "super" ATEs		 * and 64MB "regular" ATEs.  We'll try both if needed for a		 * given mapping but which one we try first depends on the		 * size.  For requests >64MB, prefer to use a super page with		 * regular as the fallback. Otherwise, try in the reverse order.		 */		if (byte_count > MB(64)) {			mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,						  port, ct_addr, byte_count,						  dma_flags);			if (!mapaddr)				mapaddr =				    tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,						    ct_addr, byte_count,						    dma_flags);		} else {			mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,						  ct_addr, byte_count,						  dma_flags);			if (!mapaddr)				mapaddr =				    tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,						    port, ct_addr, byte_count,						    dma_flags);		}	}	/*	 * 32-bit direct is the next mode to try	 */	if (!mapaddr && dma_mask >= 0xffffffffUL)		mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);	/*	 * Last resort, try 32-bit ATE-based map.	 */	if (!mapaddr)		mapaddr =		    tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,				    byte_count, dma_flags);	spin_unlock_irqrestore(&ce_kern->ce_lock, flags);dma_map_done:	if (mapaddr && barrier)		mapaddr = tioce_dma_barrier(mapaddr, 1);	return mapaddr;}/** * tioce_dma - standard pci dma map interface * @pdev: pci device requesting the map * @paddr: system physical address to map into pci space * @byte_count: # bytes to map * * Simply call tioce_do_dma_map() to create a map with the barrier bit clear * in the address. */static u64tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags){	return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);}/** * tioce_dma_consistent - consistent pci dma map interface * @pdev: pci device requesting the map * @paddr: system physical address to map into pci space * @byte_count: # bytes to map * * Simply call tioce_do_dma_map() to create a map with the barrier bit set * in the address. */ static u64tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags){	return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);}/** * tioce_error_intr_handler - SGI TIO CE error interrupt handler * @irq: unused * @arg: pointer to tioce_common struct for the given CE * * Handle a CE error interrupt.  Simply a wrapper around a SAL call which * defers processing to the SGI prom. */ static irqreturn_ttioce_error_intr_handler(int irq, void *arg){	struct tioce_common *soft = arg;	struct ia64_sal_retval ret_stuff;	ret_stuff.status = 0;	ret_stuff.v0 = 0;	SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT,			soft->ce_pcibus.bs_persist_segment,			soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);	if (ret_stuff.v0)		panic("tioce_error_intr_handler:  Fatal TIOCE error");	return IRQ_HANDLED;}/** * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range * @tioce_kernel: TIOCE context to reserve ATEs for * @base: starting bus address to reserve * @limit: last bus address to reserve * * If base/limit falls within the range of bus space mapped through the * M32 space, reserve the resources corresponding to the range. */static voidtioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit){	int ate_index, last_ate, ps;	struct tioce __iomem *ce_mmr;	ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;	ps = ce_kern->ce_ate3240_pagesize;	ate_index = ATE_PAGE(base, ps);	last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1;	if (ate_index < 64)		ate_index = 64;	if (last_ate >= TIOCE_NUM_M3240_ATES)		last_ate = TIOCE_NUM_M3240_ATES - 1;	while (ate_index <= last_ate) {		u64 ate;		ate = ATE_MAKE(0xdeadbeef, ps, 0);		ce_kern->ce_ate3240_shadow[ate_index] = ate;		tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],				 ate);		ate_index++;	}}/** * tioce_kern_init - init kernel structures related to a given TIOCE * @tioce_common: ptr to a cached tioce_common struct that originated in prom */static struct tioce_kernel *tioce_kern_init(struct tioce_common *tioce_common){	int i;	int ps;	int dev;	u32 tmp;	unsigned int seg, bus;	struct tioce __iomem *tioce_mmr;	struct tioce_kernel *tioce_kern;	tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL);	if (!tioce_kern) {		return NULL;	}	tioce_kern->ce_common = tioce_common;	spin_lock_init(&tioce_kern->ce_lock);	INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list);	tioce_common->ce_kernel_private = (u64) tioce_kern;	/*	 * Determine the secondary bus number of the port2 logical PPB.	 * This is used to decide whether a given pci device resides on	 * port1 or port2.  Note:  We don't have enough plumbing set up	 * here to use pci_read_config_xxx() so use the raw_pci_ops vector.	 */	seg = tioce_common->ce_pcibus.bs_persist_segment;	bus = tioce_common->ce_pcibus.bs_persist_busnum;	raw_pci_ops->read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp);	tioce_kern->ce_port1_secondary = (u8) tmp;	/*	 * Set PMU pagesize to the largest size available, and zero out	 * the ATEs.	 */	tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;	tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map,		       CE_URE_PAGESIZE_MASK);	tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map,		       CE_URE_256K_PAGESIZE);	ps = tioce_kern->ce_ate3240_pagesize = KB(256);	for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {		tioce_kern->ce_ate40_shadow[i] = 0;		tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0);	}	for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {		tioce_kern->ce_ate3240_shadow[i] = 0;		tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0);	}	/*	 * Reserve ATEs corresponding to reserved address ranges.  These	 * include:	 *	 *	Memory space covered by each PPB mem base/limit register	 * 	Memory space covered by each PPB prefetch base/limit register	 *	 * These bus ranges are for pio (downstream) traffic only, and so	 * cannot be used for DMA.	 */	for (dev = 1; dev <= 2; dev++) {		u64 base, limit;		/* mem base/limit */		raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),				  PCI_MEMORY_BASE, 2, &tmp);		base = (u64)tmp << 16;		raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),				  PCI_MEMORY_LIMIT, 2, &tmp);		limit = (u64)tmp << 16;		limit |= 0xfffffUL;		if (base < limit)			tioce_reserve_m32(tioce_kern, base, limit);		/*		 * prefetch mem base/limit.  The tioce ppb's have 64-bit		 * decoders, so read the upper portions w/o checking the		 * attributes.		 */		raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),				  PCI_PREF_MEMORY_BASE, 2, &tmp);		base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;		raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),				  PCI_PREF_BASE_UPPER32, 4, &tmp);		base |= (u64)tmp << 32;		raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),				  PCI_PREF_MEMORY_LIMIT, 2, &tmp);		limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;		limit |= 0xfffffUL;		raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),				  PCI_PREF_LIMIT_UPPER32, 4, &tmp);		limit |= (u64)tmp << 32;		if ((base < limit) && TIOCE_M32_ADDR(base))			tioce_reserve_m32(tioce_kern, base, limit);	}	return tioce_kern;}/** * tioce_force_interrupt - implement altix force_interrupt() backend for CE * @sn_irq_info: sn asic irq that we need an interrupt generated for * * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to * force a secondary interrupt to be generated.  This is to work around an * asic issue where there is a small window of opportunity for a legacy device * interrupt to be lost. */static voidtioce_force_interrupt(struct sn_irq_info *sn_irq_info){	struct pcidev_info *pcidev_info;	struct tioce_common *ce_common;	struct tioce_kernel *ce_kern;	struct tioce __iomem *ce_mmr;	u64 force_int_val;	if (!sn_irq_info->irq_bridge)		return;	if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE)		return;	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;	if (!pcidev_info)		return;	ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;	ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;	ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;	/*	 * TIOCE Rev A workaround (PV 945826), force an interrupt by writing	 * the TIO_INTx register directly (1/26/2006)	 */	if (ce_common->ce_rev == TIOCE_REV_A) {		u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit);		u64 status;		tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status);		if (status & int_bit_mask) {			u64 force_irq = (1 << 8) | sn_irq_info->irq_irq;			u64 ctalk = sn_irq_info->irq_xtalkaddr;			u64 nasid, offset;			nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT;			offset = (ctalk & CTALK_NODE_OFFSET);			HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq);		}		return;	}	/*	 * irq_int_bit is originally set up by prom, and holds the interrupt	 * bit shift (not mask) as defined by the bit definitions in the	 * ce_adm_int mmr.  These shifts are not the same for the	 * ce_adm_force_int register, so do an explicit mapping here to make	 * things clearer.	 */	switch (sn_irq_info->irq_int_bit) {	case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT;		break;	case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT;		break;	case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT;		break;	case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT;		break;	case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT;		break;	case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT;		break;	case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT;		break;	case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT:		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT;		break;	default:		return;	}	tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val);}/** * tioce_target_interrupt - implement set_irq_affinity for tioce resident * functions.  Note:  only applies to line interrupts, not MSI's. * * @sn_irq_info: SN IRQ context * * Given an sn_irq_info, set the associated CE device's interrupt destination * register.  Since the interrupt destination registers are on a per-ce-slot * basis, this will retarget line interrupts for all functions downstream of * the slot. */static voidtioce_target_interrupt(struct sn_irq_info *sn_irq_info){	struct pcidev_info *pcidev_info;	struct tioce_common *ce_common;	struct tioce_kernel *ce_kern;	struct tioce __iomem *ce_mmr;	int bit;	u64 vector;	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;	if (!pcidev_info)		return;	ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;	ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;	ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;	bit = sn_irq_info->irq_int_bit;	tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));	vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;	vector |= sn_irq_info->irq_xtalkaddr;	tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector);	tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));	tioce_force_interrupt(sn_irq_info);}/** * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus * @prom_bussoft: Common prom/kernel struct representing the bus * * Replicates the tioce_common pointed to by @prom_bussoft in kernel * space.  Allocates and initializes a kernel-only area for a given CE, * and sets up an irq for handling CE error interrupts. * * On successful setup, returns the kernel version of tioce_common back to * the caller. */static void *tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller){	struct tioce_common *tioce_common;	struct tioce_kernel *tioce_kern;	struct tioce __iomem *tioce_mmr;	/*	 * Allocate kernel bus soft and copy from prom.	 */	tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL);	if (!tioce_common)		return NULL;	memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));	tioce_common->ce_pcibus.bs_base = (unsigned long)		ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base),			sizeof(struct tioce_common));	tioce_kern = tioce_kern_init(tioce_common);	if (tioce_kern == NULL) {		kfree(tioce_common);		return NULL;	}	/*	 * Clear out any transient errors before registering the error	 * interrupt handler.	 */	tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;	tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);	tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,		       ~0ULL);	tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);	if (request_irq(SGI_PCIASIC_ERROR,			tioce_error_intr_handler,			IRQF_SHARED, "TIOCE error", (void *)tioce_common))		printk(KERN_WARNING		       "%s:  Unable to get irq %d.  "		       "Error interrupts won't be routed for "		       "TIOCE bus %04x:%02x\n",		       __FUNCTION__, SGI_PCIASIC_ERROR,		       tioce_common->ce_pcibus.bs_persist_segment,		       tioce_common->ce_pcibus.bs_persist_busnum);	sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);	return tioce_common;}static struct sn_pcibus_provider tioce_pci_interfaces = {	.dma_map = tioce_dma,	.dma_map_consistent = tioce_dma_consistent,	.dma_unmap = tioce_dma_unmap,	.bus_fixup = tioce_bus_fixup,	.force_interrupt = tioce_force_interrupt,	.target_interrupt = tioce_target_interrupt};/** * tioce_init_provider - init SN PCI provider ops for TIO CE */inttioce_init_provider(void){	sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces;	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -