⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vlsi_ir.c

📁 《linux驱动程序设计从入门到精通》一书中所有的程序代码含驱动和相应的应用程序
💻 C
📖 第 1 页 / 共 4 页
字号:
		for(;;) {			do_gettimeofday(&now);			if (now.tv_sec > ready.tv_sec			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))			    	break;			udelay(100);			/* must not sleep here - called under netif_tx_lock! */		}	}	/* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()	 * after subsequent tx-completion	 */	if (idev->mode == IFF_SIR) {		status |= RD_TX_DISCRC;		/* no hw-crc creation */		len = async_wrap_skb(skb, rd->buf, r->len);		/* Some rare worst case situation in SIR mode might lead to		 * potential buffer overflow. The wrapper detects this, returns		 * with a shortened frame (without FCS/EOF) but doesn't provide		 * any error indication about the invalid packet which we are		 * going to transmit.		 * Therefore we log if the buffer got filled to the point, where the		 * wrapper would abort, i.e. when there are less than 5 bytes left to		 * allow appending the FCS/EOF.		 */		if (len >= r->len-5)			 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",				      __FUNCTION__);	}	else {		/* hw deals with MIR/FIR mode wrapping */		status |= RD_TX_PULSE;		/* send 2 us highspeed indication pulse */		len = skb->len;		if (len > r->len) {			msg = "frame exceeds tx buffer length";			goto drop;		}		else			memcpy(rd->buf, skb->data, len);	}	rd->skb = skb;			/* remember skb for tx-complete stats */	rd_set_count(rd, len);	rd_set_status(rd, status);	/* not yet active! */	/* give dma buffer back to busmaster-hw (flush caches to make	 * CPU-driven changes visible from the pci bus).	 */	pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);/*	Switching to TX mode here races with the controller *	which may stop TX at any time when fetching an inactive descriptor *	or one with CLR_ENTX set. So we switch on TX only, if TX was not running *	_after_ the new descriptor was activated on the ring. This ensures *	we will either find TX already stopped or we can be sure, there *	will be a TX-complete interrupt even if the chip stopped doing *	TX just after we found it still running. The ISR will then find *	the non-empty ring and restart TX processing. The enclosing *	spinlock provides the correct serialization to prevent race with isr. */	spin_lock_irqsave(&idev->lock,flags);	rd_activate(rd);	if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {		int fifocnt;		fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;		if (fifocnt != 0) {			IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);		}		config = inw(iobase+VLSI_PIO_IRCFG);		mb();		outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);		wmb();		outw(0, iobase+VLSI_PIO_PROMPT);	}	ndev->trans_start = jiffies;	if (ring_put(r) == NULL) {		netif_stop_queue(ndev);		IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__);	}	spin_unlock_irqrestore(&idev->lock, flags);	return 0;drop_unlock:	spin_unlock_irqrestore(&idev->lock, flags);drop:	IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg);	dev_kfree_skb_any(skb);	idev->stats.tx_errors++;	idev->stats.tx_dropped++;	/* Don't even think about returning NET_XMIT_DROP (=1) here!	 * In fact any retval!=0 causes the packet scheduler to requeue the	 * packet for later retry of transmission - which isn't exactly	 * what we want after we've just called dev_kfree_skb_any ;-)	 */	return 0;}static void vlsi_tx_interrupt(struct net_device *ndev){	vlsi_irda_dev_t *idev = ndev->priv;	struct vlsi_ring	*r = idev->tx_ring;	struct ring_descr	*rd;	unsigned	iobase;	int	ret;	u16	config;	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {		if (rd_is_active(rd))			break;		ret = vlsi_process_tx(r, rd);		if (ret < 0) {			ret = -ret;			idev->stats.tx_errors++;			if (ret & VLSI_TX_DROP)				idev->stats.tx_dropped++;			if (ret & VLSI_TX_FIFO)				idev->stats.tx_fifo_errors++;		}		else if (ret > 0){			idev->stats.tx_packets++;			idev->stats.tx_bytes += ret;		}	}	iobase = ndev->base_addr;	if (idev->new_baud  &&  rd == NULL)	/* tx ring empty and speed change pending */		vlsi_set_baud(idev, iobase);	config = inw(iobase+VLSI_PIO_IRCFG);	if (rd == NULL)			/* tx ring empty: re-enable rx */		outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);	else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {		int fifocnt;		fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;		if (fifocnt != 0) {			IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",				__FUNCTION__, fifocnt);		}		outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);	}	outw(0, iobase+VLSI_PIO_PROMPT);	if (netif_queue_stopped(ndev)  &&  !idev->new_baud) {		netif_wake_queue(ndev);		IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__);	}}/* caller must have stopped the controller from busmastering */static void vlsi_unarm_tx(vlsi_irda_dev_t *idev){	struct vlsi_ring *r = idev->tx_ring;	struct ring_descr *rd;	int ret;	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {		ret = 0;		if (rd_is_active(rd)) {			rd_set_status(rd, 0);			rd_set_count(rd, 0);			pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);			if (rd->skb) {				dev_kfree_skb_any(rd->skb);				rd->skb = NULL;			}			IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__);			ret = -VLSI_TX_DROP;		}		else			ret = vlsi_process_tx(r, rd);		if (ret < 0) {			ret = -ret;			idev->stats.tx_errors++;			if (ret & VLSI_TX_DROP)				idev->stats.tx_dropped++;			if (ret & VLSI_TX_FIFO)				idev->stats.tx_fifo_errors++;		}		else if (ret > 0){			idev->stats.tx_packets++;			idev->stats.tx_bytes += ret;		}	}}/********************************************************/static int vlsi_start_clock(struct pci_dev *pdev){	u8	clkctl, lock;	int	i, count;	if (clksrc < 2) { /* auto or PLL: try PLL */		clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;		pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);		/* procedure to detect PLL lock synchronisation:		 * after 0.5 msec initial delay we expect to find 3 PLL lock		 * indications within 10 msec for successful PLL detection.		 */		udelay(500);		count = 0;		for (i = 500; i <= 10000; i += 50) { /* max 10 msec */			pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);			if (lock&CLKCTL_LOCK) {				if (++count >= 3)					break;			}			udelay(50);		}		if (count < 3) {			if (clksrc == 1) { /* explicitly asked for PLL hence bail out */				IRDA_ERROR("%s: no PLL or failed to lock!\n",					   __FUNCTION__);				clkctl = CLKCTL_CLKSTP;				pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);				return -1;			}			else			/* was: clksrc=0(auto) */				clksrc = 3;	/* fallback to 40MHz XCLK (OB800) */			IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",				__FUNCTION__, clksrc);		}		else			clksrc = 1;	/* got successful PLL lock */	}	if (clksrc != 1) {		/* we get here if either no PLL detected in auto-mode or		   an external clock source was explicitly specified */		clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;		if (clksrc == 3)			clkctl |= CLKCTL_XCKSEL;			pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);		/* no way to test for working XCLK */	}	else		pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);	/* ok, now going to connect the chip with the clock source */	clkctl &= ~CLKCTL_CLKSTP;	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);	return 0;}static void vlsi_stop_clock(struct pci_dev *pdev){	u8	clkctl;	/* disconnect chip from clock source */	pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);	clkctl |= CLKCTL_CLKSTP;	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);	/* disable all clock sources */	clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);}/********************************************************//* writing all-zero to the VLSI PCI IO register area seems to prevent * some occasional situations where the hardware fails (symptoms are  * what appears as stalled tx/rx state machines, i.e. everything ok for * receive or transmit but hw makes no progress or is unable to access * the bus memory locations). * Best place to call this is immediately after/before the internal clock * gets started/stopped. */static inline void vlsi_clear_regs(unsigned iobase){	unsigned	i;	const unsigned	chip_io_extent = 32;	for (i = 0; i < chip_io_extent; i += sizeof(u16))		outw(0, iobase + i);}static int vlsi_init_chip(struct pci_dev *pdev){	struct net_device *ndev = pci_get_drvdata(pdev);	vlsi_irda_dev_t *idev = ndev->priv;	unsigned	iobase;	u16 ptr;	/* start the clock and clean the registers */	if (vlsi_start_clock(pdev)) {		IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__);		return -1;	}	iobase = ndev->base_addr;	vlsi_clear_regs(iobase);	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */	outw(0, iobase+VLSI_PIO_IRENABLE);	/* disable IrPHY-interface */	/* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */	outw(0, iobase+VLSI_PIO_IRCFG);	wmb();	outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT);  /* max possible value=0x0fff */	outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);	outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),		iobase+VLSI_PIO_RINGSIZE);		ptr = inw(iobase+VLSI_PIO_RINGPTR);	atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));	atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));	atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));	atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));	vlsi_set_baud(idev, iobase);	/* idev->new_baud used as provided by caller */	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);	/* just in case - w/c pending IRQ's */	wmb();	/* DO NOT BLINDLY ENABLE IRINTR_ACTEN!	 * basically every received pulse fires an ACTIVITY-INT	 * leading to >>1000 INT's per second instead of few 10	 */	outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);	return 0;}static int vlsi_start_hw(vlsi_irda_dev_t *idev){	struct pci_dev *pdev = idev->pdev;	struct net_device *ndev = pci_get_drvdata(pdev);	unsigned iobase = ndev->base_addr;	u8 byte;	/* we don't use the legacy UART, disable its address decoding */	pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);	byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);	pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);	/* enable PCI busmaster access to our 16MB page */	pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);	pci_set_master(pdev);	if (vlsi_init_chip(pdev) < 0) {		pci_disable_device(pdev);		return -1;	}	vlsi_fill_rx(idev->rx_ring);	do_gettimeofday(&idev->last_rx);	/* first mtt may start from now on */	outw(0, iobase+VLSI_PIO_PROMPT);	/* kick hw state machine */	return 0;}static int vlsi_stop_hw(vlsi_irda_dev_t *idev){	struct pci_dev *pdev = idev->pdev;	struct net_device *ndev = pci_get_drvdata(pdev);	unsigned iobase = ndev->base_addr;	unsigned long flags;	spin_lock_irqsave(&idev->lock,flags);	outw(0, iobase+VLSI_PIO_IRENABLE);	outw(0, iobase+VLSI_PIO_IRCFG);			/* disable everything */	/* disable and w/c irqs */	outb(0, iobase+VLSI_PIO_IRINTR);	wmb();	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);	spin_unlock_irqrestore(&idev->lock,flags);	vlsi_unarm_tx(idev);	vlsi_unarm_rx(idev);	vlsi_clear_regs(iobase);	vlsi_stop_clock(pdev);	pci_disable_device(pdev);	return 0;}/**************************************************************/static struct net_device_stats * vlsi_get_stats(struct net_device *ndev){	vlsi_irda_dev_t *idev = ndev->priv;	return &idev->stats;}static void vlsi_tx_timeout(struct net_device *ndev){	vlsi_irda_dev_t *idev = ndev->priv;	vlsi_reg_debug(ndev->base_addr, __FUNCTION__);	vlsi_ring_debug(idev->tx_ring);	if (netif_running(ndev))		netif_stop_queue(ndev);	vlsi_stop_hw(idev);	/* now simply restart the whole thing */	if (!idev->new_baud)		idev->new_baud = idev->baud;		/* keep current baudrate */	if (vlsi_start_hw(idev))		IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",			   __FUNCTION__, pci_name(idev->pdev), ndev->name);	else		netif_start_queue(ndev);}static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd){	vlsi_irda_dev_t *idev = ndev->priv;	struct if_irda_req *irq = (struct if_irda_req *) rq;	unsigned long flags;	u16 fifocnt;	int ret = 0;	switch (cmd) {		case SIOCSBANDWIDTH:			if (!capable(CAP_NET_ADMIN)) {				ret = -EPERM;				break;			}			spin_lock_irqsave(&idev->lock, flags);			idev->new_baud = irq->ifr_baudrate;			/* when called from userland there might be a minor race window here			 * if the stack tries to change speed concurrently - which would be			 * pretty strange anyway with the userland having full control...			 */			vlsi_set_baud(idev, ndev->base_addr);			spin_unlock_irqrestore(&idev->lock, flags);			break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -