⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 acenic.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),	       &regs->HostCtrl);#endif	readl(&regs->HostCtrl);		/* PCI write posting */	/*	 * Stop the NIC CPU and clear pending interrupts	 */	writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);	readl(&regs->CpuCtrl);		/* PCI write posting */	writel(0, &regs->Mb0Lo);	tig_ver = readl(&regs->HostCtrl) >> 28;	switch(tig_ver){#ifndef CONFIG_ACENIC_OMIT_TIGON_I	case 4:	case 5:		printk(KERN_INFO "  Tigon I  (Rev. %i), Firmware: %i.%i.%i, ",		       tig_ver, tigonFwReleaseMajor, tigonFwReleaseMinor,		       tigonFwReleaseFix);		writel(0, &regs->LocalCtrl);		ap->version = 1;		ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;		break;#endif	case 6:		printk(KERN_INFO "  Tigon II (Rev. %i), Firmware: %i.%i.%i, ",		       tig_ver, tigon2FwReleaseMajor, tigon2FwReleaseMinor,		       tigon2FwReleaseFix);		writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);		readl(&regs->CpuBCtrl);		/* PCI write posting */		/*		 * The SRAM bank size does _not_ indicate the amount		 * of memory on the card, it controls the _bank_ size!		 * Ie. a 1MB AceNIC will have two banks of 512KB.		 */		writel(SRAM_BANK_512K, &regs->LocalCtrl);		writel(SYNC_SRAM_TIMING, &regs->MiscCfg);		ap->version = 2;		ap->tx_ring_entries = MAX_TX_RING_ENTRIES;		break;	default:		printk(KERN_WARNING "  Unsupported Tigon version detected "		       "(%i)\n", tig_ver);		ecode = -ENODEV;		goto init_error;	}	/*	 * ModeStat _must_ be set after the SRAM settings as this change	 * seems to corrupt the ModeStat and possible other registers.	 * The SRAM settings survive resets and setting it to the same	 * value a second time works as well. This is what caused the	 * `Firmware not running' problem on the Tigon II.	 */#ifdef __BIG_ENDIAN	writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |	       ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);#else	writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |	       ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);#endif	readl(&regs->ModeStat);		/* PCI write posting */	mac1 = 0;	for(i = 0; i < 4; i++) {		int t;		mac1 = mac1 << 8;		t = read_eeprom_byte(dev, 0x8c+i);		if (t < 0) {			ecode = -EIO;			goto init_error;		} else			mac1 |= (t & 0xff);	}	mac2 = 0;	for(i = 4; i < 8; i++) {		int t;		mac2 = mac2 << 8;		t = read_eeprom_byte(dev, 0x8c+i);		if (t < 0) {			ecode = -EIO;			goto init_error;		} else			mac2 |= (t & 0xff);	}	writel(mac1, &regs->MacAddrHi);	writel(mac2, &regs->MacAddrLo);	dev->dev_addr[0] = (mac1 >> 8) & 0xff;	dev->dev_addr[1] = mac1 & 0xff;	dev->dev_addr[2] = (mac2 >> 24) & 0xff;	dev->dev_addr[3] = (mac2 >> 16) & 0xff;	dev->dev_addr[4] = (mac2 >> 8) & 0xff;	dev->dev_addr[5] = mac2 & 0xff;	printk("MAC: %s\n", print_mac(mac, dev->dev_addr));	/*	 * Looks like this is necessary to deal with on all architectures,	 * even this %$#%$# N440BX Intel based thing doesn't get it right.	 * Ie. having two NICs in the machine, one will have the cache	 * line set at boot time, the other will not.	 */	pdev = ap->pdev;	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);	cache_size <<= 2;	if (cache_size != SMP_CACHE_BYTES) {		printk(KERN_INFO "  PCI cache line size set incorrectly "		       "(%i bytes) by BIOS/FW, ", cache_size);		if (cache_size > SMP_CACHE_BYTES)			printk("expecting %i\n", SMP_CACHE_BYTES);		else {			printk("correcting to %i\n", SMP_CACHE_BYTES);			pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,					      SMP_CACHE_BYTES >> 2);		}	}	pci_state = readl(&regs->PciState);	printk(KERN_INFO "  PCI bus width: %i bits, speed: %iMHz, "	       "latency: %i clks\n",	       	(pci_state & PCI_32BIT) ? 32 : 64,		(pci_state & PCI_66MHZ) ? 66 : 33,		ap->pci_latency);	/*	 * Set the max DMA transfer size. Seems that for most systems	 * the performance is better when no MAX parameter is	 * set. However for systems enabling PCI write and invalidate,	 * DMA writes must be set to the L1 cache line size to get	 * optimal performance.	 *	 * The default is now to turn the PCI write and invalidate off	 * - that is what Alteon does for NT.	 */	tmp = READ_CMD_MEM | WRITE_CMD_MEM;	if (ap->version >= 2) {		tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));		/*		 * Tuning parameters only supported for 8 cards		 */		if (board_idx == BOARD_IDX_OVERFLOW ||		    dis_pci_mem_inval[board_idx]) {			if (ap->pci_command & PCI_COMMAND_INVALIDATE) {				ap->pci_command &= ~PCI_COMMAND_INVALIDATE;				pci_write_config_word(pdev, PCI_COMMAND,						      ap->pci_command);				printk(KERN_INFO "  Disabling PCI memory "				       "write and invalidate\n");			}		} else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {			printk(KERN_INFO "  PCI memory write & invalidate "			       "enabled by BIOS, enabling counter measures\n");			switch(SMP_CACHE_BYTES) {			case 16:				tmp |= DMA_WRITE_MAX_16;				break;			case 32:				tmp |= DMA_WRITE_MAX_32;				break;			case 64:				tmp |= DMA_WRITE_MAX_64;				break;			case 128:				tmp |= DMA_WRITE_MAX_128;				break;			default:				printk(KERN_INFO "  Cache line size %i not "				       "supported, PCI write and invalidate "				       "disabled\n", SMP_CACHE_BYTES);				ap->pci_command &= ~PCI_COMMAND_INVALIDATE;				pci_write_config_word(pdev, PCI_COMMAND,						      ap->pci_command);			}		}	}#ifdef __sparc__	/*	 * On this platform, we know what the best dma settings	 * are.  We use 64-byte maximum bursts, because if we	 * burst larger than the cache line size (or even cross	 * a 64byte boundary in a single burst) the UltraSparc	 * PCI controller will disconnect at 64-byte multiples.	 *	 * Read-multiple will be properly enabled above, and when	 * set will give the PCI controller proper hints about	 * prefetching.	 */	tmp &= ~DMA_READ_WRITE_MASK;	tmp |= DMA_READ_MAX_64;	tmp |= DMA_WRITE_MAX_64;#endif#ifdef __alpha__	tmp &= ~DMA_READ_WRITE_MASK;	tmp |= DMA_READ_MAX_128;	/*	 * All the docs say MUST NOT. Well, I did.	 * Nothing terrible happens, if we load wrong size.	 * Bit w&i still works better!	 */	tmp |= DMA_WRITE_MAX_128;#endif	writel(tmp, &regs->PciState);#if 0	/*	 * The Host PCI bus controller driver has to set FBB.	 * If all devices on that PCI bus support FBB, then the controller	 * can enable FBB support in the Host PCI Bus controller (or on	 * the PCI-PCI bridge if that applies).	 * -ggg	 */	/*	 * I have received reports from people having problems when this	 * bit is enabled.	 */	if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {		printk(KERN_INFO "  Enabling PCI Fast Back to Back\n");		ap->pci_command |= PCI_COMMAND_FAST_BACK;		pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);	}#endif	/*	 * Configure DMA attributes.	 */	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {		ap->pci_using_dac = 1;	} else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {		ap->pci_using_dac = 0;	} else {		ecode = -ENODEV;		goto init_error;	}	/*	 * Initialize the generic info block and the command+event rings	 * and the control blocks for the transmit and receive rings	 * as they need to be setup once and for all.	 */	if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),					  &ap->info_dma))) {		ecode = -EAGAIN;		goto init_error;	}	ap->info = info;	/*	 * Get the memory for the skb rings.	 */	if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {		ecode = -EAGAIN;		goto init_error;	}	ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,			    DRV_NAME, dev);	if (ecode) {		printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",		       DRV_NAME, pdev->irq);		goto init_error;	} else		dev->irq = pdev->irq;#ifdef INDEX_DEBUG	spin_lock_init(&ap->debug_lock);	ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;	ap->last_std_rx = 0;	ap->last_mini_rx = 0;#endif	memset(ap->info, 0, sizeof(struct ace_info));	memset(ap->skb, 0, sizeof(struct ace_skb));	ace_load_firmware(dev);	ap->fw_running = 0;	tmp_ptr = ap->info_dma;	writel(tmp_ptr >> 32, &regs->InfoPtrHi);	writel(tmp_ptr & 0xffffffff, &regs->InfoPtrLo);	memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));	set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);	info->evt_ctrl.flags = 0;	*(ap->evt_prd) = 0;	wmb();	set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);	writel(0, &regs->EvtCsm);	set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);	info->cmd_ctrl.flags = 0;	info->cmd_ctrl.max_len = 0;	for (i = 0; i < CMD_RING_ENTRIES; i++)		writel(0, &regs->CmdRng[i]);	writel(0, &regs->CmdPrd);	writel(0, &regs->CmdCsm);	tmp_ptr = ap->info_dma;	tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);	set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);	set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);	info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;	info->rx_std_ctrl.flags =	  RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;	memset(ap->rx_std_ring, 0,	       RX_STD_RING_ENTRIES * sizeof(struct rx_desc));	for (i = 0; i < RX_STD_RING_ENTRIES; i++)		ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;	ap->rx_std_skbprd = 0;	atomic_set(&ap->cur_rx_bufs, 0);	set_aceaddr(&info->rx_jumbo_ctrl.rngptr,		    (ap->rx_ring_base_dma +		     (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));	info->rx_jumbo_ctrl.max_len = 0;	info->rx_jumbo_ctrl.flags =	  RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;	memset(ap->rx_jumbo_ring, 0,	       RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));	for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)		ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;	ap->rx_jumbo_skbprd = 0;	atomic_set(&ap->cur_jumbo_bufs, 0);	memset(ap->rx_mini_ring, 0,	       RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));	if (ap->version >= 2) {		set_aceaddr(&info->rx_mini_ctrl.rngptr,			    (ap->rx_ring_base_dma +			     (sizeof(struct rx_desc) *			      (RX_STD_RING_ENTRIES +			       RX_JUMBO_RING_ENTRIES))));		info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;		info->rx_mini_ctrl.flags =		  RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG;		for (i = 0; i < RX_MINI_RING_ENTRIES; i++)			ap->rx_mini_ring[i].flags =				BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;	} else {		set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);		info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;		info->rx_mini_ctrl.max_len = 0;	}	ap->rx_mini_skbprd = 0;	atomic_set(&ap->cur_mini_bufs, 0);	set_aceaddr(&info->rx_return_ctrl.rngptr,		    (ap->rx_ring_base_dma +		     (sizeof(struct rx_desc) *		      (RX_STD_RING_ENTRIES +		       RX_JUMBO_RING_ENTRIES +		       RX_MINI_RING_ENTRIES))));	info->rx_return_ctrl.flags = 0;	info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;	memset(ap->rx_return_ring, 0,	       RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));	set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);	*(ap->rx_ret_prd) = 0;	writel(TX_RING_BASE, &regs->WinBase);	if (ACE_IS_TIGON_I(ap)) {		ap->tx_ring = (__force struct tx_desc *) regs->Window;		for (i = 0; i < (TIGON_I_TX_RING_ENTRIES				 * sizeof(struct tx_desc)) / sizeof(u32); i++)			writel(0, (__force void __iomem *)ap->tx_ring  + i * 4);		set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);	} else {		memset(ap->tx_ring, 0,		       MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));		set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);	}	info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);	tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;	/*	 * The Tigon I does not like having the TX ring in host memory ;-(	 */	if (!ACE_IS_TIGON_I(ap))		tmp |= RCB_FLG_TX_HOST_RING;#if TX_COAL_INTS_ONLY	tmp |= RCB_FLG_COAL_INT_ONLY;#endif	info->tx_ctrl.flags = tmp;	set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);	/*	 * Potential item for tuning parameter	 */#if 0 /* NO */	writel(DMA_THRESH_16W, &regs->DmaReadCfg);	writel(DMA_THRESH_16W, &regs->DmaWriteCfg);#else	writel(DMA_THRESH_8W, &regs->DmaReadCfg);	writel(DMA_THRESH_8W, &regs->DmaWriteCfg);#endif	writel(0, &regs->MaskInt);	writel(1, &regs->IfIdx);#if 0	/*	 * McKinley boxes do not like us fiddling with AssistState	 * this early	 */	writel(1, &regs->AssistState);#endif	writel(DEF_STAT, &regs->TuneStatTicks);	writel(DEF_TRACE, &regs->TuneTrace);	ace_set_rxtx_parms(dev, 0);	if (board_idx == BOARD_IDX_OVERFLOW) {		printk(KERN_WARNING "%s: more than %i NICs detected, "		       "ignoring module parameters!\n",		       ap->name, ACE_MAX_MOD_PARMS);	} else if (board_idx >= 0) {		if (tx_coal_tick[board_idx])			writel(tx_coal_tick[board_idx],			       &regs->TuneTxCoalTicks);		if (max_tx_desc[board_idx])			writel(max_tx_desc[board_idx], &regs->TuneMaxTxDesc);		if (rx_coal_tick[board_idx])			writel(rx_coal_tick[board_idx],			       &regs->TuneRxCoalTicks);		if (max_rx_desc[board_idx])			writel(max_rx_desc[board_idx], &regs->TuneMaxRxDesc);		if (trace[board_idx])			writel(trace[board_idx], &regs->TuneTrace);		if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))			writel(tx_ratio[board_idx], &regs->TxBufRat);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -