cassini.c

来自「linux 内核源代码」· C语言 代码 · 共 2,335 行 · 第 1/5 页

C
2,335
字号
		writel(val, cp->regs + REG_PCS_MII_CTRL);		limit = STOP_TRIES;		while (limit-- > 0) {			udelay(10);			if ((readl(cp->regs + REG_PCS_MII_CTRL) &			     PCS_MII_RESET) == 0)				break;		}		if (limit <= 0)			printk(KERN_WARNING "%s: PCS reset bit would not "			       "clear [%08x].\n", cp->dev->name,			       readl(cp->regs + REG_PCS_STATE_MACHINE));		/* Make sure PCS is disabled while changing advertisement		 * configuration.		 */		writel(0x0, cp->regs + REG_PCS_CFG);		/* Advertise all capabilities except half-duplex. */		val  = readl(cp->regs + REG_PCS_MII_ADVERT);		val &= ~PCS_MII_ADVERT_HD;		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |			PCS_MII_ADVERT_ASYM_PAUSE);		writel(val, cp->regs + REG_PCS_MII_ADVERT);		/* enable PCS */		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);		/* pcs workaround: enable sync detect */		writel(PCS_SERDES_CTRL_SYNCD_EN,		       cp->regs + REG_PCS_SERDES_CTRL);	}}static int cas_pcs_link_check(struct cas *cp){	u32 stat, state_machine;	int retval = 0;	/* The link status bit latches on zero, so you must	 * read it twice in such a case to see a transition	 * to the link being up.	 */	stat = readl(cp->regs + REG_PCS_MII_STATUS);	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)		stat = readl(cp->regs + REG_PCS_MII_STATUS);	/* The remote-fault indication is only valid	 * when autoneg has completed.	 */	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |		     PCS_MII_STATUS_REMOTE_FAULT)) ==	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {		if (netif_msg_link(cp))			printk(KERN_INFO "%s: PCS RemoteFault\n",			       cp->dev->name);	}	/* work around link detection issue by querying the PCS state	 * machine directly.	 */	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {		stat &= ~PCS_MII_STATUS_LINK_STATUS;	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {		stat |= PCS_MII_STATUS_LINK_STATUS;	}	if (stat & PCS_MII_STATUS_LINK_STATUS) {		if (cp->lstate != link_up) {			if (cp->opened) {				cp->lstate = link_up;				cp->link_transition = LINK_TRANSITION_LINK_UP;				cas_set_link_modes(cp);				netif_carrier_on(cp->dev);			}		}	} else if (cp->lstate == link_up) {		cp->lstate = link_down;		if (link_transition_timeout != 0 &&		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&		    !cp->link_transition_jiffies_valid) {			/*			 * force a reset, as a workaround for the			 * link-failure problem. May want to move this to a			 * point a bit earlier in the sequence. If we had			 * generated a reset a short time ago, we'll wait for			 * the link timer to check the status until a			 * timer expires (link_transistion_jiffies_valid is			 * true when the timer is running.)  Instead of using			 * a system timer, we just do a check whenever the			 * link timer is running - this clears the flag after			 * a suitable delay.			 */			retval = 1;			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;			cp->link_transition_jiffies = jiffies;			cp->link_transition_jiffies_valid = 1;		} else {			cp->link_transition = LINK_TRANSITION_ON_FAILURE;		}		netif_carrier_off(cp->dev);		if (cp->opened && netif_msg_link(cp)) {			printk(KERN_INFO "%s: PCS link down.\n",			       cp->dev->name);		}		/* Cassini only: if you force a mode, there can be		 * sync problems on link down. to fix that, the following		 * things need to be checked:		 * 1) read serialink state register		 * 2) read pcs status register to verify link down.		 * 3) if link down and serial link == 0x03, then you need		 *    to global reset the chip.		 */		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {			/* should check to see if we're in a forced mode */			stat = readl(cp->regs + REG_PCS_SERDES_STATE);			if (stat == 0x03)				return 1;		}	} else if (cp->lstate == link_down) {		if (link_transition_timeout != 0 &&		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&		    !cp->link_transition_jiffies_valid) {			/* force a reset, as a workaround for the			 * link-failure problem.  May want to move			 * this to a point a bit earlier in the			 * sequence.			 */			retval = 1;			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;			cp->link_transition_jiffies = jiffies;			cp->link_transition_jiffies_valid = 1;		} else {			cp->link_transition = LINK_TRANSITION_STILL_FAILED;		}	}	return retval;}static int cas_pcs_interrupt(struct net_device *dev,			     struct cas *cp, u32 status){	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)		return 0;	return cas_pcs_link_check(cp);}static int cas_txmac_interrupt(struct net_device *dev,			       struct cas *cp, u32 status){	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);	if (!txmac_stat)		return 0;	if (netif_msg_intr(cp))		printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",			cp->dev->name, txmac_stat);	/* Defer timer expiration is quite normal,	 * don't even log the event.	 */	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))		return 0;	spin_lock(&cp->stat_lock[0]);	if (txmac_stat & MAC_TX_UNDERRUN) {		printk(KERN_ERR "%s: TX MAC xmit underrun.\n",		       dev->name);		cp->net_stats[0].tx_fifo_errors++;	}	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {		printk(KERN_ERR "%s: TX MAC max packet size error.\n",		       dev->name);		cp->net_stats[0].tx_errors++;	}	/* The rest are all cases of one of the 16-bit TX	 * counters expiring.	 */	if (txmac_stat & MAC_TX_COLL_NORMAL)		cp->net_stats[0].collisions += 0x10000;	if (txmac_stat & MAC_TX_COLL_EXCESS) {		cp->net_stats[0].tx_aborted_errors += 0x10000;		cp->net_stats[0].collisions += 0x10000;	}	if (txmac_stat & MAC_TX_COLL_LATE) {		cp->net_stats[0].tx_aborted_errors += 0x10000;		cp->net_stats[0].collisions += 0x10000;	}	spin_unlock(&cp->stat_lock[0]);	/* We do not keep track of MAC_TX_COLL_FIRST and	 * MAC_TX_PEAK_ATTEMPTS events.	 */	return 0;}static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware){	cas_hp_inst_t *inst;	u32 val;	int i;	i = 0;	while ((inst = firmware) && inst->note) {		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);		++firmware;		++i;	}}static void cas_init_rx_dma(struct cas *cp){	u64 desc_dma = cp->block_dvma;	u32 val;	int i, size;	/* rx free descriptors */	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));	if ((N_RX_DESC_RINGS > 1) &&	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));	writel(val, cp->regs + REG_RX_CFG);	val = (unsigned long) cp->init_rxds[0] -		(unsigned long) cp->init_block;	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {		/* rx desc 2 is for IPSEC packets. however,		 * we don't it that for that purpose.		 */		val = (unsigned long) cp->init_rxds[1] -			(unsigned long) cp->init_block;		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);		writel((desc_dma + val) & 0xffffffff, cp->regs +		       REG_PLUS_RX_DB1_LOW);		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +		       REG_PLUS_RX_KICK1);	}	/* rx completion registers */	val = (unsigned long) cp->init_rxcs[0] -		(unsigned long) cp->init_block;	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {		/* rx comp 2-4 */		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {			val = (unsigned long) cp->init_rxcs[i] -				(unsigned long) cp->init_block;			writel((desc_dma + val) >> 32, cp->regs +			       REG_PLUS_RX_CBN_HI(i));			writel((desc_dma + val) & 0xffffffff, cp->regs +			       REG_PLUS_RX_CBN_LOW(i));		}	}	/* read selective clear regs to prevent spurious interrupts	 * on reset because complete == kick.	 * selective clear set up to prevent interrupts on resets	 */	readl(cp->regs + REG_INTR_STATUS_ALIAS);	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {		for (i = 1; i < N_RX_COMP_RINGS; i++)			readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));		/* 2 is different from 3 and 4 */		if (N_RX_COMP_RINGS > 1)			writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,			       cp->regs + REG_PLUS_ALIASN_CLEAR(1));		for (i = 2; i < N_RX_COMP_RINGS; i++)			writel(INTR_RX_DONE_ALT,			       cp->regs + REG_PLUS_ALIASN_CLEAR(i));	}	/* set up pause thresholds */	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);	val |= CAS_BASE(RX_PAUSE_THRESH_ON,			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);	writel(val, cp->regs + REG_RX_PAUSE_THRESH);	/* zero out dma reassembly buffers */	for (i = 0; i < 64; i++) {		writel(i, cp->regs + REG_RX_TABLE_ADDR);		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);	}	/* make sure address register is 0 for normal operation */	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);	/* interrupt mitigation */#ifdef USE_RX_BLANK	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);	writel(val, cp->regs + REG_RX_BLANK);#else	writel(0x0, cp->regs + REG_RX_BLANK);#endif	/* interrupt generation as a function of low water marks for	 * free desc and completion entries. these are used to trigger	 * housekeeping for rx descs. we don't use the free interrupt	 * as it's not very useful	 */	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);	writel(val, cp->regs + REG_RX_AE_THRESH);	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);	}	/* Random early detect registers. useful for congestion avoidance.	 * this should be tunable.	 */	writel(0x0, cp->regs + REG_RX_RED);	/* receive page sizes. default == 2K (0x800) */	val = 0;	if (cp->page_size == 0x1000)		val = 0x1;	else if (cp->page_size == 0x2000)		val = 0x2;	else if (cp->page_size == 0x4000)		val = 0x3;	/* round mtu + offset. constrain to page size. */	size = cp->dev->mtu + 64;	if (size > cp->page_size)		size = cp->page_size;	if (size <= 0x400)		i = 0x0;	else if (size <= 0x800)		i = 0x1;	else if (size <= 0x1000)		i = 0x2;	else		i = 0x3;	cp->mtu_stride = 1 << (i + 10);	val  = CAS_BASE(RX_PAGE_SIZE, val);	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);	writel(val, cp->regs + REG_RX_PAGE_SIZE);	/* enable the header parser if desired */	if (CAS_HP_FIRMWARE == cas_prog_null)		return;	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);	writel(val, cp->regs + REG_HP_CFG);}static inline void cas_rxc_init(struct cas_rx_comp *rxc){	memset(rxc, 0, sizeof(*rxc));	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);}/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] * flipping is protected by the fact that the chip will not * hand back the same page index while it's being processed. */static inline cas_page_t *cas_page_spare(struct cas *cp, const int index){	cas_page_t *page = cp->rx_pages[1][index];	cas_page_t *new;	if (page_count(page->buffer) == 1)		return page;	new = cas_page_dequeue(cp);	if (new) {		spin_lock(&cp->rx_inuse_lock);		list_add(&page->list, &cp->rx_inuse_list);		spin_unlock(&cp->rx_inuse_lock);	}	return new;}/* this needs to be changed if we actually use the ENC RX DESC ring */static cas_page_t *cas_page_swap(struct cas *cp, const int ring,				 const int index){	cas_page_t **page0 = cp->rx_pages[0];	cas_page_t **page1 = cp->rx_pages[1];	/* swap if buffer is in use */	if (page_count(page0[index]->buffer) > 1) {		cas_page_t *new = cas_page_spare(cp, index);		if (new) {			page1[index] = page0[index];			page0[index] = new;		}	}	RX_USED_SET(page0[index], 0);	return page0[index];}static void cas_clean_rxds(struct cas *cp){	/* only clean ring 0 as ring 1 is used for spare buffers */        struct cas_rx_desc *rxd = cp->init_rxds[0];	int i, size;	/* release all rx flows */	for (i = 0; i < N_RX_FLOWS; i++) {		struct sk_buff *skb;		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {			cas_skb_release(skb);		}	}	/* initialize descriptors */	size = RX_DESC_RINGN_SIZE(0);	for (i = 0; i < size; i++) {		cas_page_t *page = cas_page_swap(cp, 0, i);		rxd[i].buffer = cpu_to_le64(page->dma_addr);		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?