⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 via-velocity.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 *	Replace the current skb that is scheduled for Rx processing by a *	shorter, immediatly allocated skb, if the received packet is small *	enough. This function returns a negative value if the received *	packet is too big or if memory is exhausted. */static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,				   struct velocity_info *vptr){	int ret = -1;	if (pkt_size < rx_copybreak) {		struct sk_buff *new_skb;		new_skb = dev_alloc_skb(pkt_size + 2);		if (new_skb) {			new_skb->dev = vptr->dev;			new_skb->ip_summed = rx_skb[0]->ip_summed;			if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)				skb_reserve(new_skb, 2);			skb_copy_from_linear_data(rx_skb[0], new_skb->data,						  pkt_size);			*rx_skb = new_skb;			ret = 0;		}	}	return ret;}/** *	velocity_iph_realign	-	IP header alignment *	@vptr: velocity we are handling *	@skb: network layer packet buffer *	@pkt_size: received data size * *	Align IP header on a 2 bytes boundary. This behavior can be *	configured by the user. */static inline void velocity_iph_realign(struct velocity_info *vptr,					struct sk_buff *skb, int pkt_size){	/* FIXME - memmove ? */	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {		int i;		for (i = pkt_size; i >= 0; i--)			*(skb->data + i + 2) = *(skb->data + i);		skb_reserve(skb, 2);	}}/** *	velocity_receive_frame	-	received packet processor *	@vptr: velocity we are handling *	@idx: ring index * *	A packet has arrived. We process the packet and if appropriate *	pass the frame up the network stack */static int velocity_receive_frame(struct velocity_info *vptr, int idx){	void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);	struct net_device_stats *stats = &vptr->stats;	struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);	struct rx_desc *rd = &(vptr->rd_ring[idx]);	int pkt_len = rd->rdesc0.len;	struct sk_buff *skb;	if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {		VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);		stats->rx_length_errors++;		return -EINVAL;	}	if (rd->rdesc0.RSR & RSR_MAR)		vptr->stats.multicast++;	skb = rd_info->skb;	pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,				    vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);	/*	 *	Drop frame not meeting IEEE 802.3	 */	if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {		if (rd->rdesc0.RSR & RSR_RL) {			stats->rx_length_errors++;			return -EINVAL;		}	}	pci_action = pci_dma_sync_single_for_device;	velocity_rx_csum(rd, skb);	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {		velocity_iph_realign(vptr, skb, pkt_len);		pci_action = pci_unmap_single;		rd_info->skb = NULL;	}	pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,		   PCI_DMA_FROMDEVICE);	skb_put(skb, pkt_len - 4);	skb->protocol = eth_type_trans(skb, vptr->dev);	stats->rx_bytes += pkt_len;	netif_rx(skb);	return 0;}/** *	velocity_alloc_rx_buf	-	allocate aligned receive buffer *	@vptr: velocity *	@idx: ring index * *	Allocate a new full sized buffer for the reception of a frame and *	map it into PCI space for the hardware to use. The hardware *	requires *64* byte alignment of the buffer which makes life *	less fun than would be ideal. */static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx){	struct rx_desc *rd = &(vptr->rd_ring[idx]);	struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);	rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);	if (rd_info->skb == NULL)		return -ENOMEM;	/*	 *	Do the gymnastics to get the buffer head for data at	 *	64byte alignment.	 */	skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);	rd_info->skb->dev = vptr->dev;	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);	/*	 *	Fill in the descriptor to match 	 */	*((u32 *) & (rd->rdesc0)) = 0;	rd->len = cpu_to_le32(vptr->rx_buf_sz);	rd->inten = 1;	rd->pa_low = cpu_to_le32(rd_info->skb_dma);	rd->pa_high = 0;	return 0;}/** *	tx_srv		-	transmit interrupt service *	@vptr; Velocity *	@status: * *	Scan the queues looking for transmitted packets that *	we can complete and clean up. Update any statistics as *	necessary/ */static int velocity_tx_srv(struct velocity_info *vptr, u32 status){	struct tx_desc *td;	int qnum;	int full = 0;	int idx;	int works = 0;	struct velocity_td_info *tdinfo;	struct net_device_stats *stats = &vptr->stats;	for (qnum = 0; qnum < vptr->num_txq; qnum++) {		for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;			idx = (idx + 1) % vptr->options.numtx) {			/*			 *	Get Tx Descriptor			 */			td = &(vptr->td_rings[qnum][idx]);			tdinfo = &(vptr->td_infos[qnum][idx]);			if (td->tdesc0.owner == OWNED_BY_NIC)				break;			if ((works++ > 15))				break;			if (td->tdesc0.TSR & TSR0_TERR) {				stats->tx_errors++;				stats->tx_dropped++;				if (td->tdesc0.TSR & TSR0_CDH)					stats->tx_heartbeat_errors++;				if (td->tdesc0.TSR & TSR0_CRS)					stats->tx_carrier_errors++;				if (td->tdesc0.TSR & TSR0_ABT)					stats->tx_aborted_errors++;				if (td->tdesc0.TSR & TSR0_OWC)					stats->tx_window_errors++;			} else {				stats->tx_packets++;				stats->tx_bytes += tdinfo->skb->len;			}			velocity_free_tx_buf(vptr, tdinfo);			vptr->td_used[qnum]--;		}		vptr->td_tail[qnum] = idx;		if (AVAIL_TD(vptr, qnum) < 1) {			full = 1;		}	}	/*	 *	Look to see if we should kick the transmit network	 *	layer for more work.	 */	if (netif_queue_stopped(vptr->dev) && (full == 0)	    && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {		netif_wake_queue(vptr->dev);	}	return works;}/** *	velocity_print_link_status	-	link status reporting *	@vptr: velocity to report on * *	Turn the link status of the velocity card into a kernel log *	description of the new link state, detailing speed and duplex *	status */static void velocity_print_link_status(struct velocity_info *vptr){	if (vptr->mii_status & VELOCITY_LINK_FAIL) {		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);	} else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);		if (vptr->mii_status & VELOCITY_SPEED_1000)			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");		else if (vptr->mii_status & VELOCITY_SPEED_100)			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");		else			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)			VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");		else			VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");	} else {		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);		switch (vptr->options.spd_dpx) {		case SPD_DPX_100_HALF:			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");			break;		case SPD_DPX_100_FULL:			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");			break;		case SPD_DPX_10_HALF:			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");			break;		case SPD_DPX_10_FULL:			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");			break;		default:			break;		}	}}/** *	velocity_error	-	handle error from controller *	@vptr: velocity *	@status: card status * *	Process an error report from the hardware and attempt to recover *	the card itself. At the moment we cannot recover from some *	theoretically impossible errors but this could be fixed using *	the pci_device_failed logic to bounce the hardware * */static void velocity_error(struct velocity_info *vptr, int status){	if (status & ISR_TXSTLI) {		struct mac_regs __iomem * regs = vptr->mac_regs;		printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);		writew(TRDCSR_RUN, &regs->TDCSRClr);		netif_stop_queue(vptr->dev);		/* FIXME: port over the pci_device_failed code and use it		   here */	}	if (status & ISR_SRCI) {		struct mac_regs __iomem * regs = vptr->mac_regs;		int linked;		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {			vptr->mii_status = check_connection_type(regs);			/*			 *	If it is a 3119, disable frame bursting in			 *	halfduplex mode and enable it in fullduplex			 *	 mode			 */			if (vptr->rev_id < REV_ID_VT3216_A0) {				if (vptr->mii_status | VELOCITY_DUPLEX_FULL)					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);				else					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);			}			/*			 *	Only enable CD heart beat counter in 10HD mode			 */			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) {				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);			} else {				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);			}		}		/*		 *	Get link status from PHYSR0		 */		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;		if (linked) {			vptr->mii_status &= ~VELOCITY_LINK_FAIL;			netif_carrier_on(vptr->dev);		} else {			vptr->mii_status |= VELOCITY_LINK_FAIL;			netif_carrier_off(vptr->dev);		}		velocity_print_link_status(vptr);		enable_flow_control_ability(vptr);		/*		 *	Re-enable auto-polling because SRCI will disable		 *	auto-polling		 */		enable_mii_autopoll(regs);		if (vptr->mii_status & VELOCITY_LINK_FAIL)			netif_stop_queue(vptr->dev);		else			netif_wake_queue(vptr->dev);	};	if (status & ISR_MIBFI)		velocity_update_hw_mibs(vptr);	if (status & ISR_LSTEI)		mac_rx_queue_wake(vptr->mac_regs);}/** *	velocity_free_tx_buf	-	free transmit buffer *	@vptr: velocity *	@tdinfo: buffer * *	Release an transmit buffer. If the buffer was preallocated then *	recycle it, if not then unmap the buffer. */static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo){	struct sk_buff *skb = tdinfo->skb;	int i;	/*	 *	Don't unmap the pre-allocated tx_bufs	 */	if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) {		for (i = 0; i < tdinfo->nskb_dma; i++) {#ifdef VELOCITY_ZERO_COPY_SUPPORT			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);#else			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);#endif			tdinfo->skb_dma[i] = 0;		}	}	dev_kfree_skb_irq(skb);	tdinfo->skb = NULL;}/** *	velocity_open		-	interface activation callback *	@dev: network layer device to open * *	Called when the network layer brings the interface up. Returns *	a negative posix error code on failure, or zero on success. * *	All the ring allocation and set up is done on open for this *	adapter to minimise memory usage when inactive */static int velocity_open(struct net_device *dev){	struct velocity_info *vptr = netdev_priv(dev);	int ret;	ret = velocity_init_rings(vptr);	if (ret < 0)		goto out;	ret = velocity_init_rd_ring(vptr);	if (ret < 0)		goto err_free_desc_rings;	ret = velocity_init_td_ring(vptr);	if (ret < 0)		goto err_free_rd_ring;	/* Ensure chip is running */	pci_set_power_state(vptr->pdev, PCI_D0);	velocity_init_registers(vptr, VELOCITY_INIT_COLD);	ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,			  dev->name, dev);	if (ret < 0) {		/* Power down the chip */		pci_set_power_state(vptr->pdev, PCI_D3hot);		goto err_free_td_ring;	}	mac_enable_int(vptr->mac_regs);	netif_start_queue(dev);	vptr->flags |= VELOCITY_FLAGS_OPENED;out:	return ret;err_free_td_ring:	velocity_free_td_ring(vptr);err_free_rd_ring:	velocity_free_rd_ring(vptr);err_free_desc_rings:	velocity_free_rings(vptr);	goto out;}/** *	velocity_change_mtu	-	MTU change callback *	@dev: network device *	@new_mtu: desired MTU * *	Handle requests from the networking layer for MTU change on *	this interface. It gets called on a change by the network layer. *	Return zero for success or negative posix error code. */static int velocity_change_mtu(struct net_device *dev, int new_mtu){	struct velocity_info *vptr = netdev_priv(dev);	unsigned long flags;	int oldmtu = dev->mtu;	int ret = 0;	if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",				vptr->dev->name);		return -EINVAL;	}	if (!netif_running(dev)) {		dev->mtu = new_mtu;		return 0;	}	if (new_mtu != oldmtu) {		spin_lock_irqsave(&vptr->lock, flags);		netif_stop_queue(dev);		velocity_shutdown(vptr);		velocity_free_td_ring(vptr);		velocity_free_rd_ring(vptr);		dev->mtu = new_mtu;		ret = velocity_init_rd_ring(vptr);		if (ret < 0)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -