⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vlsi_ir.c

📁 《linux驱动程序设计从入门到精通》一书中所有的程序代码含驱动和相应的应用程序
💻 C
📖 第 1 页 / 共 4 页
字号:
	return 0;}static int vlsi_create_hwif(vlsi_irda_dev_t *idev){	char 			*ringarea;	struct ring_descr_hw	*hwmap;	idev->virtaddr = NULL;	idev->busaddr = 0;	ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);	if (!ringarea) {		IRDA_ERROR("%s: insufficient memory for descriptor rings\n",			   __FUNCTION__);		goto out;	}	memset(ringarea, 0, HW_RING_AREA_SIZE);	hwmap = (struct ring_descr_hw *)ringarea;	idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],					XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);	if (idev->rx_ring == NULL)		goto out_unmap;	hwmap += MAX_RING_DESCR;	idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],					XFER_BUF_SIZE, PCI_DMA_TODEVICE);	if (idev->tx_ring == NULL)		goto out_free_rx;	idev->virtaddr = ringarea;	return 0;out_free_rx:	vlsi_free_ring(idev->rx_ring);out_unmap:	idev->rx_ring = idev->tx_ring = NULL;	pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);	idev->busaddr = 0;out:	return -ENOMEM;}static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev){	vlsi_free_ring(idev->rx_ring);	vlsi_free_ring(idev->tx_ring);	idev->rx_ring = idev->tx_ring = NULL;	if (idev->busaddr)		pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);	idev->virtaddr = NULL;	idev->busaddr = 0;	return 0;}/********************************************************/static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd){	u16		status;	int		crclen, len = 0;	struct sk_buff	*skb;	int		ret = 0;	struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);	vlsi_irda_dev_t *idev = ndev->priv;	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);	/* dma buffer now owned by the CPU */	status = rd_get_status(rd);	if (status & RD_RX_ERROR) {		if (status & RD_RX_OVER)  			ret |= VLSI_RX_OVER;		if (status & RD_RX_LENGTH)  			ret |= VLSI_RX_LENGTH;		if (status & RD_RX_PHYERR)  			ret |= VLSI_RX_FRAME;		if (status & RD_RX_CRCERR)  			ret |= VLSI_RX_CRC;		goto done;	}	len = rd_get_count(rd);	crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);	len -= crclen;		/* remove trailing CRC */	if (len <= 0) {		IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len);		ret |= VLSI_RX_DROP;		goto done;	}	if (idev->mode == IFF_SIR) {	/* hw checks CRC in MIR, FIR mode */		/* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the		 * endian-adjustment there just in place will dirty a cache line		 * which belongs to the map and thus we must be sure it will		 * get flushed before giving the buffer back to hardware.		 * vlsi_fill_rx() will do this anyway - but here we rely on.		 */		le16_to_cpus(rd->buf+len);		if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {			IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__);			ret |= VLSI_RX_CRC;			goto done;		}	}	if (!rd->skb) {		IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__);		ret |= VLSI_RX_DROP;		goto done;	}	skb = rd->skb;	rd->skb = NULL;	skb->dev = ndev;	memcpy(skb_put(skb,len), rd->buf, len);	skb->mac.raw = skb->data;	if (in_interrupt())		netif_rx(skb);	else		netif_rx_ni(skb);	ndev->last_rx = jiffies;done:	rd_set_status(rd, 0);	rd_set_count(rd, 0);	/* buffer still owned by CPU */	return (ret) ? -ret : len;}static void vlsi_fill_rx(struct vlsi_ring *r){	struct ring_descr *rd;	for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {		if (rd_is_active(rd)) {			IRDA_WARNING("%s: driver bug: rx descr race with hw\n",				     __FUNCTION__);			vlsi_ring_debug(r);			break;		}		if (!rd->skb) {			rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);			if (rd->skb) {				skb_reserve(rd->skb,1);				rd->skb->protocol = htons(ETH_P_IRDA);			}			else				break;	/* probably not worth logging? */		}		/* give dma buffer back to busmaster */		pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);		rd_activate(rd);	}}static void vlsi_rx_interrupt(struct net_device *ndev){	vlsi_irda_dev_t *idev = ndev->priv;	struct vlsi_ring *r = idev->rx_ring;	struct ring_descr *rd;	int ret;	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {		if (rd_is_active(rd))			break;		ret = vlsi_process_rx(r, rd);		if (ret < 0) {			ret = -ret;			idev->stats.rx_errors++;			if (ret & VLSI_RX_DROP)  				idev->stats.rx_dropped++;			if (ret & VLSI_RX_OVER)  				idev->stats.rx_over_errors++;			if (ret & VLSI_RX_LENGTH)  				idev->stats.rx_length_errors++;			if (ret & VLSI_RX_FRAME)  				idev->stats.rx_frame_errors++;			if (ret & VLSI_RX_CRC)  				idev->stats.rx_crc_errors++;		}		else if (ret > 0) {			idev->stats.rx_packets++;			idev->stats.rx_bytes += ret;		}	}	do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */	vlsi_fill_rx(r);	if (ring_first(r) == NULL) {		/* we are in big trouble, if this should ever happen */		IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__);		vlsi_ring_debug(r);	}	else		outw(0, ndev->base_addr+VLSI_PIO_PROMPT);}/* caller must have stopped the controller from busmastering */static void vlsi_unarm_rx(vlsi_irda_dev_t *idev){	struct vlsi_ring *r = idev->rx_ring;	struct ring_descr *rd;	int ret;	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {		ret = 0;		if (rd_is_active(rd)) {			rd_set_status(rd, 0);			if (rd_get_count(rd)) {				IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__);				ret = -VLSI_RX_DROP;			}			rd_set_count(rd, 0);			pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);			if (rd->skb) {				dev_kfree_skb_any(rd->skb);				rd->skb = NULL;			}		}		else			ret = vlsi_process_rx(r, rd);		if (ret < 0) {			ret = -ret;			idev->stats.rx_errors++;			if (ret & VLSI_RX_DROP)  				idev->stats.rx_dropped++;			if (ret & VLSI_RX_OVER)  				idev->stats.rx_over_errors++;			if (ret & VLSI_RX_LENGTH)  				idev->stats.rx_length_errors++;			if (ret & VLSI_RX_FRAME)  				idev->stats.rx_frame_errors++;			if (ret & VLSI_RX_CRC)  				idev->stats.rx_crc_errors++;		}		else if (ret > 0) {			idev->stats.rx_packets++;			idev->stats.rx_bytes += ret;		}	}}/********************************************************/static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd){	u16		status;	int		len;	int		ret;	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);	/* dma buffer now owned by the CPU */	status = rd_get_status(rd);	if (status & RD_TX_UNDRN)		ret = VLSI_TX_FIFO;	else		ret = 0;	rd_set_status(rd, 0);	if (rd->skb) {		len = rd->skb->len;		dev_kfree_skb_any(rd->skb);		rd->skb = NULL;	}	else	/* tx-skb already freed? - should never happen */		len = rd_get_count(rd);		/* incorrect for SIR! (due to wrapping) */	rd_set_count(rd, 0);	/* dma buffer still owned by the CPU */	return (ret) ? -ret : len;}static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase){	u16 nphyctl;	u16 config;	unsigned mode;	int	ret;	int	baudrate;	int	fifocnt;	baudrate = idev->new_baud;	IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);	if (baudrate == 4000000) {		mode = IFF_FIR;		config = IRCFG_FIR;		nphyctl = PHYCTL_FIR;	}	else if (baudrate == 1152000) {		mode = IFF_MIR;		config = IRCFG_MIR | IRCFG_CRC16;		nphyctl = PHYCTL_MIR(clksrc==3);	}	else {		mode = IFF_SIR;		config = IRCFG_SIR | IRCFG_SIRFILT  | IRCFG_RXANY;		switch(baudrate) {			default:				IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",					     __FUNCTION__, baudrate);				baudrate = 9600;				/* fallthru */			case 2400:			case 9600:			case 19200:			case 38400:			case 57600:			case 115200:				nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);				break;		}	}	config |= IRCFG_MSTR | IRCFG_ENRX;	fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;	if (fifocnt != 0) {		IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);	}	outw(0, iobase+VLSI_PIO_IRENABLE);	outw(config, iobase+VLSI_PIO_IRCFG);	outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);	wmb();	outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);	mb();	udelay(1);	/* chip applies IRCFG on next rising edge of its 8MHz clock */	/* read back settings for validation */	config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;	if (mode == IFF_FIR)		config ^= IRENABLE_FIR_ON;	else if (mode == IFF_MIR)		config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);	else		config ^= IRENABLE_SIR_ON;	if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {		IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__,			(mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));		ret = -1;	}	else {		if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {			IRDA_WARNING("%s: failed to apply baudrate %d\n",				     __FUNCTION__, baudrate);			ret = -1;		}		else {			idev->mode = mode;			idev->baud = baudrate;			idev->new_baud = 0;			ret = 0;		}	}	if (ret)		vlsi_reg_debug(iobase,__FUNCTION__);	return ret;}static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev){	vlsi_irda_dev_t *idev = ndev->priv;	struct vlsi_ring	*r = idev->tx_ring;	struct ring_descr *rd;	unsigned long flags;	unsigned iobase = ndev->base_addr;	u8 status;	u16 config;	int mtt;	int len, speed;	struct timeval  now, ready;	char *msg = NULL;	speed = irda_get_next_speed(skb);	spin_lock_irqsave(&idev->lock, flags);	if (speed != -1  &&  speed != idev->baud) {		netif_stop_queue(ndev);		idev->new_baud = speed;		status = RD_TX_CLRENTX;  /* stop tx-ring after this frame */	}	else		status = 0;	if (skb->len == 0) {		/* handle zero packets - should be speed change */		if (status == 0) {			msg = "bogus zero-length packet";			goto drop_unlock;		}		/* due to the completely asynch tx operation we might have		 * IrLAP racing with the hardware here, f.e. if the controller		 * is just sending the last packet with current speed while		 * the LAP is already switching the speed using synchronous		 * len=0 packet. Immediate execution would lead to hw lockup		 * requiring a powercycle to reset. Good candidate to trigger		 * this is the final UA:RSP packet after receiving a DISC:CMD		 * when getting the LAP down.		 * Note that we are not protected by the queue_stop approach		 * because the final UA:RSP arrives _without_ request to apply		 * new-speed-after-this-packet - hence the driver doesn't know		 * this was the last packet and doesn't stop the queue. So the		 * forced switch to default speed from LAP gets through as fast		 * as only some 10 usec later while the UA:RSP is still processed		 * by the hardware and we would get screwed.		 */		if (ring_first(idev->tx_ring) == NULL) {			/* no race - tx-ring already empty */			vlsi_set_baud(idev, iobase);			netif_wake_queue(ndev);		}		else			;			/* keep the speed change pending like it would			 * for any len>0 packet. tx completion interrupt			 * will apply it when the tx ring becomes empty.			 */		spin_unlock_irqrestore(&idev->lock, flags);		dev_kfree_skb_any(skb);		return 0;	}	/* sanity checks - simply drop the packet */	rd = ring_last(r);	if (!rd) {		msg = "ring full, but queue wasn't stopped";		goto drop_unlock;	}	if (rd_is_active(rd)) {		msg = "entry still owned by hw";		goto drop_unlock;	}	if (!rd->buf) {		msg = "tx ring entry without pci buffer";		goto drop_unlock;	}	if (rd->skb) {		msg = "ring entry with old skb still attached";		goto drop_unlock;	}	/* no need for serialization or interrupt disable during mtt */	spin_unlock_irqrestore(&idev->lock, flags);	if ((mtt = irda_get_mtt(skb)) > 0) {			ready.tv_usec = idev->last_rx.tv_usec + mtt;		ready.tv_sec = idev->last_rx.tv_sec;		if (ready.tv_usec >= 1000000) {			ready.tv_usec -= 1000000;			ready.tv_sec++;		/* IrLAP 1.1: mtt always < 1 sec */		}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -