⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dscc4.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 4 页
字号:
		try_get_rx_skb(dpriv, cur, dev);	} else {		if(skb->data[pkt_len] & FrameRdo)			dpriv->stats.rx_fifo_errors++;		else if(!(skb->data[pkt_len] | ~FrameCrc))			dpriv->stats.rx_crc_errors++;		else if(!(skb->data[pkt_len] | ~FrameVfr))			dpriv->stats.rx_length_errors++;		else			dpriv->stats.rx_errors++;	}	rx_fd->state1 |= Hold;	rx_fd->state2 = 0x00000000;	rx_fd->end = 0xbabeface;	if (!rx_fd->data)		return;	rx_fd--;	if (!cur)		rx_fd += RX_RING_SIZE;	rx_fd->state1 &= ~Hold;}static int __init dscc4_init_one (struct pci_dev *pdev,				  const struct pci_device_id *ent){	struct dscc4_pci_priv *priv;	struct dscc4_dev_priv *dpriv;	int i;	static int cards_found = 0;	unsigned long ioaddr;	printk(KERN_DEBUG "%s", version);	if (pci_enable_device(pdev))		goto err_out;	if (!request_mem_region(pci_resource_start(pdev, 0),	                	pci_resource_len(pdev, 0), "registers")) {	        printk (KERN_ERR "dscc4: can't reserve MMIO region (regs)\n");	        goto err_out;	}	if (!request_mem_region(pci_resource_start(pdev, 1),	                        pci_resource_len(pdev, 1), "LBI interface")) {	        printk (KERN_ERR "dscc4: can't reserve MMIO region (lbi)\n");	        goto err_out_free_mmio_region0;	}	ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),					pci_resource_len(pdev, 0));	if (!ioaddr) {		printk(KERN_ERR "dscc4: cannot remap MMIO region %lx @ %lx\n",	                pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));		goto err_out_free_mmio_region;	}	printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d.\n",	        pci_resource_start(pdev, 0),	        pci_resource_start(pdev, 1), pdev->irq);	/* High PCI latency useless. Cf app. note. */	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x10);	pci_set_master(pdev);	if (dscc4_found1(pdev, ioaddr))	        goto err_out_iounmap;	priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);	if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, "dscc4", priv->root)) {		printk(KERN_WARNING "dscc4: IRQ %d is busy\n", pdev->irq);		goto err_out_iounmap;	}	priv->pdev = pdev;	/* power up/little endian/dma core controlled via hold bit */	writel(0x00000000, ioaddr + GMODE);	/* Shared interrupt queue */	{		u32 bits;		bits = (IRQ_RING_SIZE >> 5) - 1;		bits |= bits << 4;		bits |= bits << 8;		bits |= bits << 16;		writel(bits, ioaddr + IQLENR0);	}	/* Global interrupt queue */	writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);	priv->iqcfg = (u32 *) pci_alloc_consistent(pdev,		IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma);	if (!priv->iqcfg)		goto err_out_free_irq;	writel(priv->iqcfg_dma, ioaddr + IQCFG);	/* 	 * SCC 0-3 private rx/tx irq structures 	 * IQRX/TXi needs to be set soon. Learned it the hard way...	 */	for(i = 0; i < dev_per_card; i++) {		dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;		dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,			IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);		if (!dpriv->iqtx)			goto err_out_free_iqtx;		writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);	}	for(i = 0; i < dev_per_card; i++) {		dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;		dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,			IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);		if (!dpriv->iqrx)			goto err_out_free_iqrx;		writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);	}	/* 	 * Cf application hint. Beware of hard-lock condition on 	 * threshold .	 */	writel(0x42104000, ioaddr + FIFOCR1);	//writel(0x9ce69800, ioaddr + FIFOCR2);	writel(0xdef6d800, ioaddr + FIFOCR2);	//writel(0x11111111, ioaddr + FIFOCR4);	writel(0x18181818, ioaddr + FIFOCR4);	// FIXME: should depend on the chipset revision	writel(0x0000000e, ioaddr + FIFOCR3);	writel(0xff200001, ioaddr + GCMDR);	cards_found++;	return 0;err_out_free_iqrx:	while (--i >= 0) {		dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;		pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 				    dpriv->iqrx, dpriv->iqrx_dma);	}	i = dev_per_card;err_out_free_iqtx:	while (--i >= 0) {		dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;		pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 				    dpriv->iqtx, dpriv->iqtx_dma);	}	pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, 			    priv->iqcfg_dma);err_out_free_irq:	free_irq(pdev->irq, priv->root);err_out_iounmap:	iounmap ((void *)ioaddr);err_out_free_mmio_region:	release_mem_region(pci_resource_start(pdev, 1),			   pci_resource_len(pdev, 1));err_out_free_mmio_region0:	release_mem_region(pci_resource_start(pdev, 0),			   pci_resource_len(pdev, 0));err_out:	return -ENODEV;};static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr){	struct dscc4_pci_priv *ppriv;	struct dscc4_dev_priv *dpriv;	struct net_device *dev;	int i = 0;	dpriv = (struct dscc4_dev_priv *)		kmalloc(dev_per_card*sizeof(struct dscc4_dev_priv), GFP_KERNEL);	if (!dpriv) {		printk(KERN_ERR "dscc4: can't allocate data\n");		goto err_out;	}	memset(dpriv, 0, dev_per_card*sizeof(struct dscc4_dev_priv));	dev = (struct net_device *)	      kmalloc(dev_per_card*sizeof(struct net_device), GFP_KERNEL);	if (!dev) {		printk(KERN_ERR "dscc4: can't allocate net_device\n");		goto err_dealloc_priv;	}	memset(dev, 0, dev_per_card*sizeof(struct net_device));	ppriv = (struct dscc4_pci_priv *)		kmalloc(sizeof(struct dscc4_pci_priv), GFP_KERNEL);	if (!ppriv) {		printk(KERN_ERR "dscc4: can't allocate pci private data.\n");		goto err_dealloc_dev;	}	memset(ppriv, 0, sizeof(struct dscc4_pci_priv));	for (i = 0; i < dev_per_card; i++) {		struct dscc4_dev_priv *p;		struct net_device *d;	        d = dev + i;	        d->base_addr = ioaddr;		d->init = NULL;	        d->irq = pdev->irq;		/* The card adds the crc */		d->type = ARPHRD_RAWHDLC;	        d->open = dscc4_open;	        d->stop = dscc4_close;	        d->hard_start_xmit = dscc4_start_xmit;		d->set_multicast_list = NULL;	        d->do_ioctl = dscc4_ioctl;		d->get_stats = dscc4_get_stats;		d->change_mtu = dscc4_change_mtu;	        d->mtu = HDLC_MAX_MTU;	        d->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;		d->tx_timeout = dscc4_tx_timeout;		d->watchdog_timeo = TX_TIMEOUT;		p = dpriv + i;		p->dev_id = i;		p->pci_priv = ppriv;		spin_lock_init(&p->lock);		d->priv = p;		if (dev_alloc_name(d, "scc%d")<0) {			printk(KERN_ERR "dev_alloc_name failed for scc.\n");			goto err_dealloc_dev;		}	        if (register_netdev(d)) {			printk(KERN_ERR "%s: register_netdev != 0.\n", d->name);			goto err_dealloc_dev;	        }		dscc4_attach_hdlc_device(d);		SET_MODULE_OWNER(d);	}	ppriv->root = dev;	ppriv->pdev = pdev;	spin_lock_init(&ppriv->lock);	pci_set_drvdata(pdev, ppriv);	return 0;err_dealloc_dev:	while (--i >= 0)		unregister_netdev(dev + i);	kfree(dev);err_dealloc_priv:	kfree(dpriv);err_out:	return -1;};static void dscc4_timer(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct dscc4_dev_priv *dpriv;	struct dscc4_pci_priv *ppriv;	dpriv = dev->priv;	if (netif_queue_stopped(dev) && 	   ((jiffies - dev->trans_start) > TX_TIMEOUT)) {		ppriv = dpriv->pci_priv;		if (dpriv->iqtx[dpriv->iqtx_current%IRQ_RING_SIZE]) {			u32 flags;			printk(KERN_DEBUG "%s: pending events\n", dev->name);			dev->trans_start = jiffies;			spin_lock_irqsave(&ppriv->lock, flags);			dscc4_tx_irq(ppriv, dev);			spin_unlock_irqrestore(&ppriv->lock, flags);		} else {			struct TxFD *tx_fd;			struct sk_buff *skb;			int i,j;			printk(KERN_DEBUG "%s: missing events\n", dev->name);			i = dpriv->tx_dirty%TX_RING_SIZE; 			j = dpriv->tx_current - dpriv->tx_dirty;			dpriv->stats.tx_dropped += j;			while(j--) {				skb = dpriv->tx_skbuff[i];				tx_fd = dpriv->tx_fd + i;				if (skb) {					dpriv->tx_skbuff[i] = NULL;					pci_unmap_single(ppriv->pdev, tx_fd->data, skb->len,						 	 PCI_DMA_TODEVICE);					dev_kfree_skb_irq(skb);				} else 					printk(KERN_INFO "%s: hardware on drugs!\n", dev->name);				tx_fd->data = 0; /* DEBUG */				tx_fd->complete &= ~DataComplete;				i++;					i %= TX_RING_SIZE; 			}			dpriv->tx_dirty = dpriv->tx_current;			dev->trans_start = jiffies;			netif_wake_queue(dev);			printk(KERN_DEBUG "%s: re-enabled\n", dev->name);			}	}        dpriv->timer.expires = jiffies + TX_TIMEOUT;        add_timer(&dpriv->timer);}static void dscc4_tx_timeout(struct net_device *dev){	/* FIXME: something is missing there */};static int dscc4_open(struct net_device *dev){	struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;	struct dscc4_pci_priv *ppriv;	u32 ioaddr = 0;	MOD_INC_USE_COUNT;	ppriv = dpriv->pci_priv;	if (dscc4_init_ring(dev))		goto err_out;	ioaddr = dev->base_addr + SCC_REG_START(dpriv->dev_id);	/* FIXME: VIS */	writel(readl(ioaddr + CCR0) | 0x80001000, ioaddr + CCR0);	writel(LengthCheck | (HDLC_MAX_MRU >> 5), ioaddr + RLCR);	/* no address recognition/crc-CCITT/cts enabled */	writel(readl(ioaddr + CCR1) | 0x021c8000, ioaddr + CCR1);	/* Ccr2.Rac = 0 */	writel(0x00050008 & ~RxActivate, ioaddr + CCR2);#ifdef EXPERIMENTAL_POLLING	writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */#else	/* Don't mask RDO. Ever. */	//writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */	writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */#endif	/* IDT+IDR during XPR */	dpriv->flags = NeedIDR | NeedIDT;	/*	 * The following is a bit paranoid...	 *	 * NB: the datasheet "...CEC will stay active if the SCC is in	 * power-down mode or..." and CCR2.RAC = 1 are two different	 * situations.	 */	if (readl(ioaddr + STAR) & SccBusy) {		printk(KERN_ERR "%s busy. Try later\n", dev->name);		goto err_free_ring;	}	writel(TxSccRes | RxSccRes, ioaddr + CMDR);	/* ... the following isn't */	if (dscc4_wait_ack_cec(ioaddr, dev, "Cec"))		goto err_free_ring;	/* 	 * I would expect XPR near CE completion (before ? after ?).	 * At worst, this code won't see a late XPR and people	 * will have to re-issue an ifconfig (this is harmless). 	 * WARNING, a really missing XPR usually means a hardware 	 * reset is needed. Suggestions anyone ?	 */	if (dscc4_xpr_ack(dpriv))		goto err_free_ring;		netif_start_queue(dev);        init_timer(&dpriv->timer);        dpriv->timer.expires = jiffies + 10*HZ;        dpriv->timer.data = (unsigned long)dev;        dpriv->timer.function = &dscc4_timer;        add_timer(&dpriv->timer);	netif_carrier_on(dev);	return 0;err_free_ring:	dscc4_release_ring(dpriv);err_out:	MOD_DEC_USE_COUNT;	return -EAGAIN;}#ifdef EXPERIMENTAL_POLLINGstatic int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev){	/* FIXME: it's gonna be easy (TM), for sure */}#endif /* EXPERIMENTAL_POLLING */static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct dscc4_dev_priv *dpriv = dev->priv;	struct dscc4_pci_priv *ppriv;	struct TxFD *tx_fd;	int cur, next;	ppriv = dpriv->pci_priv;	cur = dpriv->tx_current++%TX_RING_SIZE;	next = dpriv->tx_current%TX_RING_SIZE;	dpriv->tx_skbuff[next] = skb;	tx_fd = dpriv->tx_fd + next;	tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len & TxSizeMax);	tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,				     PCI_DMA_TODEVICE);	tx_fd->complete = 0x00000000;	mb(); // FIXME: suppress ?#ifdef EXPERIMENTAL_POLLING	spin_lock(&dpriv->lock);	while(dscc4_tx_poll(dpriv, dev));	spin_unlock(&dpriv->lock);#endif	/*	 * I know there's a window for a race in the following lines but	 * dscc4_timer will take good care of it. The chipset eats events	 * (especially the net_dev re-enabling ones) thus there is no	 * reason to try and be smart.	 */	if ((dpriv->tx_dirty + 16) < dpriv->tx_current) {			netif_stop_queue(dev);			dpriv->hi_expected = 2;	}	tx_fd = dpriv->tx_fd + cur;	tx_fd->state &= ~Hold;	mb(); // FIXME: suppress ?	/* 	 * One may avoid some pci transactions during intense TX periods.	 * Not sure it's worth the pain...	 */	writel((TxPollCmd << dpriv->dev_id) | NoAck, dev->base_addr + GCMDR);	dev->trans_start = jiffies;	return 0;}static int dscc4_close(struct net_device *dev){	struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;	u32 ioaddr = dev->base_addr;	int dev_id;	del_timer_sync(&dpriv->timer);	netif_stop_queue(dev);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -