📄 tioce_provider.c
字号:
static u64tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int barrier){ unsigned long flags; u64 ct_addr; u64 mapaddr = 0; struct tioce_kernel *ce_kern; struct tioce_dmamap *map; int port; u64 dma_mask; dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; /* cards must be able to address at least 31 bits */ if (dma_mask < 0x7fffffffUL) return 0; ct_addr = PHYS_TO_TIODMA(paddr); /* * If the device can generate 64 bit addresses, create a D64 map. * Since this should never fail, bypass the rest of the checks. */ if (dma_mask == ~0UL) { mapaddr = tioce_dma_d64(ct_addr); goto dma_map_done; } pcidev_to_tioce(pdev, NULL, &ce_kern, &port); spin_lock_irqsave(&ce_kern->ce_lock, flags); /* * D64 didn't work ... See if we have an existing map that covers * this address range. Must account for devices dma_mask here since * an existing map might have been done in a mode using more pci * address bits than this device can support. */ list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { u64 last; last = map->ct_start + map->nbytes - 1; if (ct_addr >= map->ct_start && ct_addr + byte_count - 1 <= last && map->pci_start <= dma_mask) { map->refcnt++; mapaddr = map->pci_start + (ct_addr - map->ct_start); break; } } /* * If we don't have a map yet, and the card can generate 40 * bit addresses, try the M40/M40S modes. Note these modes do not * support a barrier bit, so if we need a consistent map these * won't work. */ if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { /* * We have two options for 40-bit mappings: 16GB "super" ATE's * and 64MB "regular" ATE's. We'll try both if needed for a * given mapping but which one we try first depends on the * size. For requests >64MB, prefer to use a super page with * regular as the fallback. Otherwise, try in the reverse order. */ if (byte_count > MB(64)) { mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, port, ct_addr, byte_count); if (!mapaddr) mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, ct_addr, byte_count); } else { mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, ct_addr, byte_count); if (!mapaddr) mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, port, ct_addr, byte_count); } } /* * 32-bit direct is the next mode to try */ if (!mapaddr && dma_mask >= 0xffffffffUL) mapaddr = tioce_dma_d32(pdev, ct_addr); /* * Last resort, try 32-bit ATE-based map. */ if (!mapaddr) mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, byte_count); spin_unlock_irqrestore(&ce_kern->ce_lock, flags);dma_map_done: if (mapaddr & barrier) mapaddr = tioce_dma_barrier(mapaddr, 1); return mapaddr;}/** * tioce_dma - standard pci dma map interface * @pdev: pci device requesting the map * @paddr: system physical address to map into pci space * @byte_count: # bytes to map * * Simply call tioce_do_dma_map() to create a map with the barrier bit clear * in the address. */static u64tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count){ return tioce_do_dma_map(pdev, paddr, byte_count, 0);}/** * tioce_dma_consistent - consistent pci dma map interface * @pdev: pci device requesting the map * @paddr: system physical address to map into pci space * @byte_count: # bytes to map * * Simply call tioce_do_dma_map() to create a map with the barrier bit set * in the address. */ static u64tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count){ return tioce_do_dma_map(pdev, paddr, byte_count, 1);}/** * tioce_error_intr_handler - SGI TIO CE error interrupt handler * @irq: unused * @arg: pointer to tioce_common struct for the given CE * @pt: unused * * Handle a CE error interrupt. Simply a wrapper around a SAL call which * defers processing to the SGI prom. */ static irqreturn_ttioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt){ struct tioce_common *soft = arg; struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, soft->ce_pcibus.bs_persist_segment, soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); return IRQ_HANDLED;}/** * tioce_kern_init - init kernel structures related to a given TIOCE * @tioce_common: ptr to a cached tioce_common struct that originated in prom */ static struct tioce_kernel *tioce_kern_init(struct tioce_common *tioce_common){ int i; u32 tmp; struct tioce *tioce_mmr; struct tioce_kernel *tioce_kern; tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL); if (!tioce_kern) { return NULL; } tioce_kern->ce_common = tioce_common; spin_lock_init(&tioce_kern->ce_lock); INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); tioce_common->ce_kernel_private = (u64) tioce_kern; /* * Determine the secondary bus number of the port2 logical PPB. * This is used to decide whether a given pci device resides on * port1 or port2. Note: We don't have enough plumbing set up * here to use pci_read_config_xxx() so use the raw_pci_ops vector. */ raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, tioce_common->ce_pcibus.bs_persist_busnum, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); tioce_kern->ce_port1_secondary = (u8) tmp; /* * Set PMU pagesize to the largest size available, and zero out * the ate's. */ tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; __sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK); __sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE); tioce_kern->ce_ate3240_pagesize = KB(256); for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { tioce_kern->ce_ate40_shadow[i] = 0; writeq(0, &tioce_mmr->ce_ure_ate40[i]); } for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { tioce_kern->ce_ate3240_shadow[i] = 0; writeq(0, &tioce_mmr->ce_ure_ate3240[i]); } return tioce_kern;}/** * tioce_force_interrupt - implement altix force_interrupt() backend for CE * @sn_irq_info: sn asic irq that we need an interrupt generated for * * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to * force a secondary interrupt to be generated. This is to work around an * asic issue where there is a small window of opportunity for a legacy device * interrupt to be lost. */static voidtioce_force_interrupt(struct sn_irq_info *sn_irq_info){ struct pcidev_info *pcidev_info; struct tioce_common *ce_common; struct tioce *ce_mmr; u64 force_int_val; if (!sn_irq_info->irq_bridge) return; if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE) return; pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; if (!pcidev_info) return; ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; /* * irq_int_bit is originally set up by prom, and holds the interrupt * bit shift (not mask) as defined by the bit definitions in the * ce_adm_int mmr. These shifts are not the same for the * ce_adm_force_int register, so do an explicit mapping here to make * things clearer. */ switch (sn_irq_info->irq_int_bit) { case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT; break; case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT; break; case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT; break; case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT; break; case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT; break; case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT; break; case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT; break; case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT: force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT; break; default: return; } writeq(force_int_val, &ce_mmr->ce_adm_force_int);}/** * tioce_target_interrupt - implement set_irq_affinity for tioce resident * functions. Note: only applies to line interrupts, not MSI's. * * @sn_irq_info: SN IRQ context * * Given an sn_irq_info, set the associated CE device's interrupt destination * register. Since the interrupt destination registers are on a per-ce-slot * basis, this will retarget line interrupts for all functions downstream of * the slot. */static voidtioce_target_interrupt(struct sn_irq_info *sn_irq_info){ struct pcidev_info *pcidev_info; struct tioce_common *ce_common; struct tioce *ce_mmr; int bit; u64 vector; pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; if (!pcidev_info) return; ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; bit = sn_irq_info->irq_int_bit; __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; vector |= sn_irq_info->irq_xtalkaddr; writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); tioce_force_interrupt(sn_irq_info);}/** * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus * @prom_bussoft: Common prom/kernel struct representing the bus * * Replicates the tioce_common pointed to by @prom_bussoft in kernel * space. Allocates and initializes a kernel-only area for a given CE, * and sets up an irq for handling CE error interrupts. * * On successful setup, returns the kernel version of tioce_common back to * the caller. */static void *tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller){ struct tioce_common *tioce_common; /* * Allocate kernel bus soft and copy from prom. */ tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL); if (!tioce_common) return NULL; memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; if (tioce_kern_init(tioce_common) == NULL) { kfree(tioce_common); return NULL; } if (request_irq(SGI_PCIASIC_ERROR, tioce_error_intr_handler, SA_SHIRQ, "TIOCE error", (void *)tioce_common)) printk(KERN_WARNING "%s: Unable to get irq %d. " "Error interrupts won't be routed for " "TIOCE bus %04x:%02x\n", __FUNCTION__, SGI_PCIASIC_ERROR, tioce_common->ce_pcibus.bs_persist_segment, tioce_common->ce_pcibus.bs_persist_busnum); return tioce_common;}static struct sn_pcibus_provider tioce_pci_interfaces = { .dma_map = tioce_dma, .dma_map_consistent = tioce_dma_consistent, .dma_unmap = tioce_dma_unmap, .bus_fixup = tioce_bus_fixup, .force_interrupt = tioce_force_interrupt, .target_interrupt = tioce_target_interrupt};/** * tioce_init_provider - init SN PCI provider ops for TIO CE */inttioce_init_provider(void){ sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces; return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -