lba_pci.c

来自「linux 内核源代码」· C语言 代码 · 共 1,593 行 · 第 1/4 页

C
1,593
字号
	}	DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data);	return 0;}static voidlba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size){	int error = 0;	u32 arb_mask = 0;	u32 error_config = 0;	u32 status_control = 0;	void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;	LBA_CFG_SETUP(d, tok);	LBA_CFG_ADDR_SETUP(d, tok | reg);	switch (size) {	case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;	case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;	case 4: WRITE_REG32(data, data_reg);             break;	}	LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);	LBA_CFG_RESTORE(d, d->hba.base_addr);}/* * LBA 4.0 config write code implements non-postable semantics * by doing a read of CONFIG ADDR after the write. */static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data){	struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));	u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;	u32 tok = LBA_CFG_TOK(local_bus,devfn);	if ((pos > 255) || (devfn > 255))		return -EINVAL;	if (!LBA_SKIP_PROBE(d)) {		/* Original Workaround */		lba_wr_cfg(d, tok, pos, (u32) data, size);		DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data);		return 0;	}	if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {		DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data);		return 1; /* New Workaround */	}	DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);	/* Basic Algorithm */	LBA_CFG_ADDR_SETUP(d, tok | pos);	switch(size) {	case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));		   break;	case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));		   break;	case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);		   break;	}	/* flush posted write */	lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);	return 0;}static struct pci_ops elroy_cfg_ops = {	.read =		elroy_cfg_read,	.write =	elroy_cfg_write,};/* * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy * TR4.0 as no additional bugs were found in this areea between Elroy and * Mercury */static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data){	struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));	u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;	u32 tok = LBA_CFG_TOK(local_bus, devfn);	void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;	if ((pos > 255) || (devfn > 255))		return -EINVAL;	LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);	switch(size) {	case 1:		*data = READ_REG8(data_reg + (pos & 3));		break;	case 2:		*data = READ_REG16(data_reg + (pos & 2));		break;	case 4:		*data = READ_REG32(data_reg);             break;		break;	}	DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);	return 0;}/* * LBA 4.0 config write code implements non-postable semantics * by doing a read of CONFIG ADDR after the write. */static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data){	struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));	void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;	u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;	u32 tok = LBA_CFG_TOK(local_bus,devfn);	if ((pos > 255) || (devfn > 255))		return -EINVAL;	DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data);	LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);	switch(size) {	case 1:		WRITE_REG8 (data, data_reg + (pos & 3));		break;	case 2:		WRITE_REG16(data, data_reg + (pos & 2));		break;	case 4:		WRITE_REG32(data, data_reg);		break;	}	/* flush posted write */	lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);	return 0;}static struct pci_ops mercury_cfg_ops = {	.read =		mercury_cfg_read,	.write =	mercury_cfg_write,};static voidlba_bios_init(void){	DBG(MODULE_NAME ": lba_bios_init\n");}#ifdef CONFIG_64BIT/* * truncate_pat_collision:  Deal with overlaps or outright collisions *			between PAT PDC reported ranges. * *   Broken PA8800 firmware will report lmmio range that *   overlaps with CPU HPA. Just truncate the lmmio range. * *   BEWARE: conflicts with this lmmio range may be an *   elmmio range which is pointing down another rope. * *  FIXME: only deals with one collision per range...theoretically we *  could have several. Supporting more than one collision will get messy. */static unsigned longtruncate_pat_collision(struct resource *root, struct resource *new){	unsigned long start = new->start;	unsigned long end = new->end;	struct resource *tmp = root->child;	if (end <= start || start < root->start || !tmp)		return 0;	/* find first overlap */	while (tmp && tmp->end < start)		tmp = tmp->sibling;	/* no entries overlap */	if (!tmp)  return 0;	/* found one that starts behind the new one	** Don't need to do anything.	*/	if (tmp->start >= end) return 0;	if (tmp->start <= start) {		/* "front" of new one overlaps */		new->start = tmp->end + 1;		if (tmp->end >= end) {			/* AACCKK! totally overlaps! drop this range. */			return 1;		}	} 	if (tmp->end < end ) {		/* "end" of new one overlaps */		new->end = tmp->start - 1;	}	printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "					"to [%lx,%lx]\n",			start, end,			(long)new->start, (long)new->end );	return 0;	/* truncation successful */}#else#define truncate_pat_collision(r,n)  (0)#endif/*** The algorithm is generic code.** But it needs to access local data structures to get the IRQ base.** Could make this a "pci_fixup_irq(bus, region)" but not sure** it's worth it.**** Called by do_pci_scan_bus() immediately after each PCI bus is walked.** Resources aren't allocated until recursive buswalk below HBA is completed.*/static voidlba_fixup_bus(struct pci_bus *bus){	struct list_head *ln;#ifdef FBB_SUPPORT	u16 status;#endif	struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));	int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num);	DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",		bus, bus->secondary, bus->bridge->platform_data);	/*	** Properly Setup MMIO resources for this bus.	** pci_alloc_primary_bus() mangles this.	*/	if (bus->self) {		int i;		/* PCI-PCI Bridge */		pci_read_bridge_bases(bus);		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {			pci_claim_resource(bus->self, i);		}	} else {		/* Host-PCI Bridge */		int err, i;		DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",			ldev->hba.io_space.name,			ldev->hba.io_space.start, ldev->hba.io_space.end,			ldev->hba.io_space.flags);		DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",			ldev->hba.lmmio_space.name,			ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,			ldev->hba.lmmio_space.flags);		err = request_resource(&ioport_resource, &(ldev->hba.io_space));		if (err < 0) {			lba_dump_res(&ioport_resource, 2);			BUG();		}		/* advertize Host bridge resources to PCI bus */		bus->resource[0] = &(ldev->hba.io_space);		i = 1;		if (ldev->hba.elmmio_space.start) {			err = request_resource(&iomem_resource,					&(ldev->hba.elmmio_space));			if (err < 0) {				printk("FAILED: lba_fixup_bus() request for "						"elmmio_space [%lx/%lx]\n",						(long)ldev->hba.elmmio_space.start,						(long)ldev->hba.elmmio_space.end);				/* lba_dump_res(&iomem_resource, 2); */				/* BUG(); */			} else				bus->resource[i++] = &(ldev->hba.elmmio_space);		}		/*   Overlaps with elmmio can (and should) fail here.		 *   We will prune (or ignore) the distributed range.		 *		 *   FIXME: SBA code should register all elmmio ranges first.		 *      that would take care of elmmio ranges routed		 *	to a different rope (already discovered) from		 *	getting registered *after* LBA code has already		 *	registered it's distributed lmmio range.		 */		if (truncate_pat_collision(&iomem_resource,				       	&(ldev->hba.lmmio_space))) {			printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",					(long)ldev->hba.lmmio_space.start,					(long)ldev->hba.lmmio_space.end);		} else {			err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));			if (err < 0) {				printk(KERN_ERR "FAILED: lba_fixup_bus() request for "					"lmmio_space [%lx/%lx]\n",					(long)ldev->hba.lmmio_space.start,					(long)ldev->hba.lmmio_space.end);			} else				bus->resource[i++] = &(ldev->hba.lmmio_space);		}#ifdef CONFIG_64BIT		/* GMMIO is  distributed range. Every LBA/Rope gets part it. */		if (ldev->hba.gmmio_space.flags) {			err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));			if (err < 0) {				printk("FAILED: lba_fixup_bus() request for "					"gmmio_space [%lx/%lx]\n",					(long)ldev->hba.gmmio_space.start,					(long)ldev->hba.gmmio_space.end);				lba_dump_res(&iomem_resource, 2);				BUG();			}			bus->resource[i++] = &(ldev->hba.gmmio_space);		}#endif	}	list_for_each(ln, &bus->devices) {		int i;		struct pci_dev *dev = pci_dev_b(ln);		DBG("lba_fixup_bus() %s\n", pci_name(dev));		/* Virtualize Device/Bridge Resources. */		for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {			struct resource *res = &dev->resource[i];			/* If resource not allocated - skip it */			if (!res->start)				continue;			if (res->flags & IORESOURCE_IO) {				DBG("lba_fixup_bus() I/O Ports [%lx/%lx] -> ",					res->start, res->end);				res->start |= lba_portbase;				res->end   |= lba_portbase;				DBG("[%lx/%lx]\n", res->start, res->end);			} else if (res->flags & IORESOURCE_MEM) {				/*				** Convert PCI (IO_VIEW) addresses to				** processor (PA_VIEW) addresses				 */				DBG("lba_fixup_bus() MMIO [%lx/%lx] -> ",					res->start, res->end);				res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start);				res->end   = PCI_HOST_ADDR(HBA_DATA(ldev), res->end);				DBG("[%lx/%lx]\n", res->start, res->end);			} else {				DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX",					res->flags, res->start, res->end);			}			/*			** FIXME: this will result in whinging for devices			** that share expansion ROMs (think quad tulip), but			** isn't harmful.			*/			pci_claim_resource(dev, i);		}#ifdef FBB_SUPPORT		/*		** If one device does not support FBB transfers,		** No one on the bus can be allowed to use them.		*/		(void) pci_read_config_word(dev, PCI_STATUS, &status);		bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);#endif                /*		** P2PB's have no IRQs. ignore them.		*/		if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)			continue;		/* Adjust INTERRUPT_LINE for this dev */		iosapic_fixup_irq(ldev->iosapic_obj, dev);	}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?