⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 he.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);				skb_set_tail_pointer(skb, skb->len);#ifdef USE_CHECKSUM_HW				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {					skb->ip_summed = CHECKSUM_COMPLETE;					skb->csum = TCP_CKSUM(skb->data,							he_vcc->pdu_len);				}#endif				break;		}#ifdef should_never_happen		if (skb->len > vcc->qos.rxtp.max_sdu)			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);#endif#ifdef notdef		ATM_SKB(skb)->vcc = vcc;#endif		spin_unlock(&he_dev->global_lock);		vcc->push(vcc, skb);		spin_lock(&he_dev->global_lock);		atomic_inc(&vcc->stats->rx);return_host_buffers:		++pdus_assembled;		for (iov = he_vcc->iov_head;				iov < he_vcc->iov_tail; ++iov) {#ifdef USE_RBPS			if (iov->iov_base & RBP_SMALLBUF)				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];			else#endif				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];			rbp->status &= ~RBP_LOANED;		}		he_vcc->iov_tail = he_vcc->iov_head;		he_vcc->pdu_len = 0;next_rbrq_entry:		he_dev->rbrq_head = (struct he_rbrq *)				((unsigned long) he_dev->rbrq_base |					RBRQ_MASK(++he_dev->rbrq_head));	}	read_unlock(&vcc_sklist_lock);	if (updated) {		if (updated > he_dev->rbrq_peak)			he_dev->rbrq_peak = updated;		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),						G0_RBRQ_H + (group * 16));	}	return pdus_assembled;}static voidhe_service_tbrq(struct he_dev *he_dev, int group){	struct he_tbrq *tbrq_tail = (struct he_tbrq *)				((unsigned long)he_dev->tbrq_base |					he_dev->hsp->group[group].tbrq_tail);	struct he_tpd *tpd;	int slot, updated = 0;#ifdef USE_TPD_POOL	struct he_tpd *__tpd;#endif	/* 2.1.6 transmit buffer return queue */	while (he_dev->tbrq_head != tbrq_tail) {		++updated;		HPRINTK("tbrq%d 0x%x%s%s\n",			group,			TBRQ_TPD(he_dev->tbrq_head), 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");#ifdef USE_TPD_POOL		tpd = NULL;		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {				tpd = __tpd;				list_del(&__tpd->entry);				break;			}		}		if (tpd == NULL) {			hprintk("unable to locate tpd for dma buffer %x\n",						TBRQ_TPD(he_dev->tbrq_head));			goto next_tbrq_entry;		}#else		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];#endif		if (TBRQ_EOS(he_dev->tbrq_head)) {			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));			if (tpd->vcc)				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);			goto next_tbrq_entry;		}		for (slot = 0; slot < TPD_MAXIOV; ++slot) {			if (tpd->iovec[slot].addr)				pci_unmap_single(he_dev->pci_dev,					tpd->iovec[slot].addr,					tpd->iovec[slot].len & TPD_LEN_MASK,							PCI_DMA_TODEVICE);			if (tpd->iovec[slot].len & TPD_LST)				break;						}		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */			if (tpd->vcc && tpd->vcc->pop)				tpd->vcc->pop(tpd->vcc, tpd->skb);			else				dev_kfree_skb_any(tpd->skb);		}next_tbrq_entry:#ifdef USE_TPD_POOL		if (tpd)			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));#else		tpd->inuse = 0;#endif		he_dev->tbrq_head = (struct he_tbrq *)				((unsigned long) he_dev->tbrq_base |					TBRQ_MASK(++he_dev->tbrq_head));	}	if (updated) {		if (updated > he_dev->tbrq_peak)			he_dev->tbrq_peak = updated;		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),						G0_TBRQ_H + (group * 16));	}}static voidhe_service_rbpl(struct he_dev *he_dev, int group){	struct he_rbp *newtail;	struct he_rbp *rbpl_head;	int moved = 0;	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));	for (;;) {		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |						RBPL_MASK(he_dev->rbpl_tail+1));		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))			break;		newtail->status |= RBP_LOANED;		he_dev->rbpl_tail = newtail;		++moved;	} 	if (moved)		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);}#ifdef USE_RBPSstatic voidhe_service_rbps(struct he_dev *he_dev, int group){	struct he_rbp *newtail;	struct he_rbp *rbps_head;	int moved = 0;	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));	for (;;) {		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |						RBPS_MASK(he_dev->rbps_tail+1));		/* table 3.42 -- rbps_tail should never be set to rbps_head */		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))			break;		newtail->status |= RBP_LOANED;		he_dev->rbps_tail = newtail;		++moved;	} 	if (moved)		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);}#endif /* USE_RBPS */static voidhe_tasklet(unsigned long data){	unsigned long flags;	struct he_dev *he_dev = (struct he_dev *) data;	int group, type;	int updated = 0;	HPRINTK("tasklet (0x%lx)\n", data);#ifdef USE_TASKLET	spin_lock_irqsave(&he_dev->global_lock, flags);#endif	while (he_dev->irq_head != he_dev->irq_tail) {		++updated;		type = ITYPE_TYPE(he_dev->irq_head->isw);		group = ITYPE_GROUP(he_dev->irq_head->isw);		switch (type) {			case ITYPE_RBRQ_THRESH:				HPRINTK("rbrq%d threshold\n", group);				/* fall through */			case ITYPE_RBRQ_TIMER:				if (he_service_rbrq(he_dev, group)) {					he_service_rbpl(he_dev, group);#ifdef USE_RBPS					he_service_rbps(he_dev, group);#endif /* USE_RBPS */				}				break;			case ITYPE_TBRQ_THRESH:				HPRINTK("tbrq%d threshold\n", group);				/* fall through */			case ITYPE_TPD_COMPLETE:				he_service_tbrq(he_dev, group);				break;			case ITYPE_RBPL_THRESH:				he_service_rbpl(he_dev, group);				break;			case ITYPE_RBPS_THRESH:#ifdef USE_RBPS				he_service_rbps(he_dev, group);#endif /* USE_RBPS */				break;			case ITYPE_PHY:				HPRINTK("phy interrupt\n");#ifdef CONFIG_ATM_HE_USE_SUNI				spin_unlock_irqrestore(&he_dev->global_lock, flags);				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);				spin_lock_irqsave(&he_dev->global_lock, flags);#endif				break;			case ITYPE_OTHER:				switch (type|group) {					case ITYPE_PARITY:						hprintk("parity error\n");						break;					case ITYPE_ABORT:						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));						break;				}				break;			case ITYPE_TYPE(ITYPE_INVALID):				/* see 8.1.1 -- check all queues */				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);				he_service_rbrq(he_dev, 0);				he_service_rbpl(he_dev, 0);#ifdef USE_RBPS				he_service_rbps(he_dev, 0);#endif /* USE_RBPS */				he_service_tbrq(he_dev, 0);				break;			default:				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);		}		he_dev->irq_head->isw = ITYPE_INVALID;		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);	}	if (updated) {		if (updated > he_dev->irq_peak)			he_dev->irq_peak = updated;		he_writel(he_dev,			IRQ_SIZE(CONFIG_IRQ_SIZE) |			IRQ_THRESH(CONFIG_IRQ_THRESH) |			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */	}#ifdef USE_TASKLET	spin_unlock_irqrestore(&he_dev->global_lock, flags);#endif}static irqreturn_the_irq_handler(int irq, void *dev_id){	unsigned long flags;	struct he_dev *he_dev = (struct he_dev * )dev_id;	int handled = 0;	if (he_dev == NULL)		return IRQ_NONE;	spin_lock_irqsave(&he_dev->global_lock, flags);	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |						(*he_dev->irq_tailoffset << 2));	if (he_dev->irq_tail == he_dev->irq_head) {		HPRINTK("tailoffset not updated?\n");		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */	}#ifdef DEBUG	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)		hprintk("spurious (or shared) interrupt?\n");#endif	if (he_dev->irq_head != he_dev->irq_tail) {		handled = 1;#ifdef USE_TASKLET		tasklet_schedule(&he_dev->tasklet);#else		he_tasklet((unsigned long) he_dev);#endif		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */	}	spin_unlock_irqrestore(&he_dev->global_lock, flags);	return IRQ_RETVAL(handled);}static __inline__ void__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid){	struct he_tpdrq *new_tail;	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",					tpd, cid, he_dev->tpdrq_tail);	/* new_tail = he_dev->tpdrq_tail; */	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |					TPDRQ_MASK(he_dev->tpdrq_tail+1));	/*	 * check to see if we are about to set the tail == head	 * if true, update the head pointer from the adapter	 * to see if this is really the case (reading the queue	 * head for every enqueue would be unnecessarily slow)	 */	if (new_tail == he_dev->tpdrq_head) {		he_dev->tpdrq_head = (struct he_tpdrq *)			(((unsigned long)he_dev->tpdrq_base) |				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));		if (new_tail == he_dev->tpdrq_head) {			int slot;			hprintk("tpdrq full (cid 0x%x)\n", cid);			/*			 * FIXME			 * push tpd onto a transmit backlog queue			 * after service_tbrq, service the backlog			 * for now, we just drop the pdu			 */			for (slot = 0; slot < TPD_MAXIOV; ++slot) {				if (tpd->iovec[slot].addr)					pci_unmap_single(he_dev->pci_dev,						tpd->iovec[slot].addr,						tpd->iovec[slot].len & TPD_LEN_MASK,								PCI_DMA_TODEVICE);			}			if (tpd->skb) {				if (tpd->vcc->pop)					tpd->vcc->pop(tpd->vcc, tpd->skb);				else					dev_kfree_skb_any(tpd->skb);				atomic_inc(&tpd->vcc->stats->tx_err);			}#ifdef USE_TPD_POOL			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));#else			tpd->inuse = 0;#endif			return;		}	}	/* 2.1.5 transmit packet descriptor ready queue */#ifdef USE_TPD_POOL	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);#else	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));#endif	he_dev->tpdrq_tail->cid = cid;	wmb();	he_dev->tpdrq_tail = new_tail;	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */}static inthe_open(struct atm_vcc *vcc){	unsigned long flags;	struct he_dev *he_dev = HE_DEV(vcc->dev);	struct he_vcc *he_vcc;	int err = 0;	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;	short vpi = vcc->vpi;	int vci = vcc->vci;	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)		return 0;	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);	set_bit(ATM_VF_ADDR, &vcc->flags);	cid = he_mkcid(he_dev, vpi, vci);	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);	if (he_vcc == NULL) {		hprintk("unable to allocate he_vcc during open\n");		return -ENOMEM;	}	he_vcc->iov_tail = he_vcc->iov_head;	he_vcc->pdu_len = 0;	he_vcc->rc_index = -1;	init_waitqueue_head(&he_vcc->rx_waitq);	init_waitqueue_head(&he_vcc->tx_waitq);	vcc->dev_data = he_vcc;	if (vcc->qos.txtp.traffic_class != ATM_NONE) {		int pcr_goal;		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);		if (pcr_goal == 0)			pcr_goal = he_dev->atm_dev->link_rate;		if (pcr_goal < 0)	/* means round down, technically */			pcr_goal = -pcr_goal;		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);		switch (vcc->qos.aal) {			case ATM_AAL5:				tsr0_aal = TSR0

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -