⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 if_dmc.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 4 页
字号:
			sc->sc_errctrs[DMCZ_RXDCRC] = 0xff;		else			sc->sc_errctrs[DMCZ_RXDCRC] += tmp_cnt;	}	if ( (tmp_cnt = base_table[DMCD_TXHCRC] - sc->sc_basectrs[DMCZ_TXHCRC]) ) {		if ( tmp_cnt < 0 || (sc->sc_errctrs[DMCZ_TXHCRC] + tmp_cnt) > 0x0ff )			sc->sc_errctrs[DMCZ_TXHCRC] = 0xff;		else			sc->sc_errctrs[DMCZ_TXHCRC] += tmp_cnt;	}	if ( (tmp_cnt = base_table[DMCD_TXDCRC] - sc->sc_basectrs[DMCZ_TXDCRC]) ) {		if ( tmp_cnt < 0 || (sc->sc_errctrs[DMCZ_TXDCRC] + tmp_cnt) > 0x0ff )			sc->sc_errctrs[DMCZ_TXDCRC] = 0xff;		else			sc->sc_errctrs[DMCZ_TXDCRC] += tmp_cnt;	}	if ( (tmp_cnt = base_table[DMCD_RXNOBUF] - sc->sc_basectrs[DMCZ_RXNOBUF]) ) {		if ( tmp_cnt < 0 || (sc->sc_errctrs[DMCZ_RXNOBUF] + tmp_cnt) > 0x0ff )			sc->sc_errctrs[DMCZ_RXNOBUF] = 0xff;		else			sc->sc_errctrs[DMCZ_RXNOBUF] += tmp_cnt;	}	if ( (tmp_cnt = base_table[DMCD_TXNOBUF] - sc->sc_basectrs[DMCZ_TXNOBUF]) ) {		if ( tmp_cnt < 0 || (sc->sc_errctrs[DMCZ_TXNOBUF] + tmp_cnt) > 0x0ff )			sc->sc_errctrs[DMCZ_TXNOBUF] = 0xff;		else			sc->sc_errctrs[DMCZ_TXNOBUF] += tmp_cnt;	}	if ( (tmp_cnt = base_table[DMCD_REMOTETMO] - sc->sc_basectrs[DMCZ_REMOTETMO]) ) {		if ( tmp_cnt < 0 || (sc->sc_errctrs[DMCZ_REMOTETMO] + tmp_cnt) > 0x0ff )			sc->sc_errctrs[DMCZ_REMOTETMO] = 0xff;		else			sc->sc_errctrs[DMCZ_REMOTETMO] += tmp_cnt;	}	if ( (tmp_cnt = base_table[DMCD_LOCALTMO] - sc->sc_basectrs[DMCZ_LOCALTMO]) ) {		if ( tmp_cnt < 0 || (sc->sc_errctrs[DMCZ_LOCALTMO] + tmp_cnt) > 0x0ff )			sc->sc_errctrs[DMCZ_LOCALTMO] = 0xff;		else			sc->sc_errctrs[DMCZ_LOCALTMO] += tmp_cnt;	}	bcopy(&base_table[DMCD_RXNOBUF], &sc->sc_basectrs[DMCZ_RXNOBUF], DMCZ_SIZE);}/* * Routines supporting UNIBUS network interfaces. *//* * Init UNIBUS for interface on uban whose headers of size hlen are to * end on a page boundary.  We allocate a UNIBUS map register for the page * with the header, and nmr more UNIBUS map registers for i/o on the adapter, * doing this for each receive and transmit buffer.  We also * allocate page frames in the mbuffer pool for these pages. */dmc_ubainit(ifu, uban, hlen, nmr, bufres)	register struct dmcuba *ifu;	int uban, hlen, nmr;	struct dmcbufres *bufres;{	register caddr_t cp, dp;	register struct ifrw *ifrw;	register struct ifxmt *ifxp;	int i, ncl;	ncl = clrnd(nmr + (hlen? CLSIZE: 0)) / CLSIZE;	if (ifu->ifu_r[0].ifrw_addr)		/*		 * If the first read buffer has a non-zero		 * address, it means we have already allocated core		 */		cp = ifu->ifu_r[0].ifrw_addr - (hlen? (CLBYTES - hlen): 0);	else {		cp = m_clalloc(bufres->ntot * ncl, MPG_SPACE);		if (cp == 0)			return (0);		ifu->ifu_hlen = hlen;		ifu->ifu_uban = uban;		ifu->ifu_uba = uba_hd[uban].uh_uba;		dp = cp + (hlen? (CLBYTES - hlen): 0);		for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[bufres->nrcv]; ifrw++) {			ifrw->ifrw_addr = dp;			dp += ncl * CLBYTES;		}		for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[bufres->nxmt]; ifxp++) {			ifxp->x_ifrw.ifrw_addr = dp;			dp += ncl * CLBYTES;		}	}	/* allocate for receive ring */	for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[bufres->nrcv]; ifrw++) {		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {			struct ifrw *rw;			for (rw = ifu->ifu_r; rw < ifrw; rw++)				ubarelse(ifu->ifu_uban, &rw->ifrw_info);			goto bad;		}	}	/* and now transmit ring */	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[bufres->nxmt]; ifxp++) {		ifrw = &ifxp->x_ifrw;		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {			struct ifxmt *xp;			for (xp = ifu->ifu_w; xp < ifxp; xp++)				ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info);			for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[bufres->nrcv]; ifrw++)				ubarelse(ifu->ifu_uban, &ifrw->ifrw_info);			goto bad;		}		for (i = 0; i < nmr; i++)			ifxp->x_map[i] = ifrw->ifrw_mr[i];		ifxp->x_xswapd = 0;	}	return (1);bad:	m_pgfree(cp, bufres->ntot * ncl);	ifu->ifu_r[0].ifrw_addr = 0;	return(0);}/* * Setup either a ifrw structure by allocating UNIBUS map registers, * possibly a buffered data path, and initializing the fields of * the ifrw structure to minimize run-time overhead. */staticdmc_ubaalloc(ifu, ifrw, nmr)	struct dmcuba *ifu;	register struct ifrw *ifrw;	int nmr;{	register int info;	info =	    uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen,	        ifu->ifu_flags);	if (info == 0)		return (0);	ifrw->ifrw_info = info;	ifrw->ifrw_bdp = UBAI_BDP(info);	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);	ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) +(ifu->ifu_hlen? 1: 0)];	return (1);}/* * Pull read data off a interface. * Len is length of data, with local net header stripped. * Off is non-zero if a trailer protocol was used, and * gives the offset of the trailer information. * We copy the trailer information and then all the normal * data into mbufs.  When full cluster sized units are present * on the interface on cluster boundaries we can get them more * easily by remapping, and take advantage of this here. */struct mbuf *dmc_get(ifu, ifrw, totlen, off0, dmc_header)	register struct dmcuba *ifu;	register struct ifrw *ifrw;	int totlen, off0, dmc_header;{	struct mbuf *top, **mp, *m;	int off = off0, len;	register caddr_t cp = ifrw->ifrw_addr;	register short hlen = 0;	if ( dmc_header )	{		hlen = ifu->ifu_hlen;		cp += ifu->ifu_hlen;	}	top = 0;	mp = &top;	while (totlen > 0) {		MGET(m, M_DONTWAIT, MT_DATA);		if (m == 0)			goto bad;		if (off) {			len = totlen - off;			cp = ifrw->ifrw_addr + hlen + off;		} else			len = totlen;		if (len >= CLBYTES) {			struct mbuf *p;			struct pte *cpte, *ppte;			int x, *ip, i;			MCLGET(m, p);			if (p == 0)				goto nopage;			len = m->m_len = CLBYTES;			if (!claligned(cp))				goto copy;			/*			 * Switch pages mapped to UNIBUS with new page p,			 * as quick form of copy.  Remap UNIBUS and invalidate.			 */			cpte = &kmempt[mtocl(cp)];			ppte = &kmempt[mtocl(p)];			x = btop(cp - ifrw->ifrw_addr);			ip = (int *)&ifrw->ifrw_mr[x];			for (i = 0; i < CLSIZE; i++) {				struct pte t;				t = *ppte; *ppte++ = *cpte; *cpte = t;				*ip++ =				    cpte++->pg_pfnum|ifrw->ifrw_proto;				mtpr(TBIS, cp);				cp += NBPG;				mtpr(TBIS, (caddr_t)p);				p += NBPG / sizeof (*p);			}			goto nocopy;		}nopage:		m->m_len = MIN(MLEN, len);		m->m_off = MMINOFF;copy:		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);		cp += m->m_len;nocopy:		*mp = m;		mp = &m->m_next;		if (off) {			/* sort of an ALGOL-W style for statement... */			off += m->m_len;			if (off == totlen) {				cp = ifrw->ifrw_addr + hlen;				off = 0;				totlen = off0;			}		} else			totlen -= m->m_len;	}	return (top);bad:	m_freem(top);	return (0);}/* * Map a chain of mbufs onto a network interface * in preparation for an i/o operation. * The argument chain of mbufs includes the local network * header which is copied to be in the mapped, aligned * i/o space. */dmcput(ifu, n, m)	struct dmcuba *ifu;	int n;	register struct mbuf *m;{	register struct mbuf *mp;	register caddr_t cp;	register struct ifxmt *ifxp;	register struct ifrw *ifrw;	register int i;	int xswapd = 0;	int x, cc, t;	caddr_t dp;	ifxp = &ifu->ifu_w[n];	ifrw = &ifxp->x_ifrw;	cp = ifrw->ifrw_addr;	while (m) {		dp = mtod(m, char *);		if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) {			struct pte *pte; int *ip;			pte = &kmempt[mtocl(dp)];			x = btop(cp - ifrw->ifrw_addr);			ip = (int *)&ifrw->ifrw_mr[x];			for (i = 0; i < CLSIZE; i++)				*ip++ =				    ifrw->ifrw_proto | pte++->pg_pfnum;			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));			mp = m->m_next;			m->m_next = ifxp->x_xtofree;			ifxp->x_xtofree = m;			cp += m->m_len;		} else {			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);			cp += m->m_len;			MFREE(m, mp);		}		m = mp;	}	/*	 * Xswapd is the set of clusters we just mapped out.  Ifxp->x_xswapd	 * is the set of clusters mapped out from before.  We compute	 * the number of clusters involved in this operation in x.	 * Clusters mapped out before and involved in this operation	 * should be unmapped so original pages will be accessed by the device.	 */	cc = cp - ifrw->ifrw_addr;	x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT;	ifxp->x_xswapd &= ~xswapd;	while (i = ffs(ifxp->x_xswapd)) {		i--;		if (i >= x)			break;		ifxp->x_xswapd &= ~(1<<i);		i *= CLSIZE;		for (t = 0; t < CLSIZE; t++) {			ifrw->ifrw_mr[i] = ifxp->x_map[i];			i++;		}	}	ifxp->x_xswapd |= xswapd;	return (cc);}/* * Restart after a fatal error. * Clear device and reinitialize. */dmcrestart(unit)	int unit;{	register struct dmc_softc *sc = &dmc_softc[unit];	register struct uba_device *ui = dmcinfo[unit];	register struct dmcdevice *addr;	register struct ifxmt *ifxp;	register int i;	register struct mbuf *m;	struct dmcuba *ifu;		addr = (struct dmcdevice *)ui->ui_addr;	ifu = &sc->sc_ifuba;#ifdef DEBUG	/* dump base table */	printf("dmc%d base table:\n", unit);	for (i = 0; i < sizeof (struct dmc_base); i++)		printf("%o\n" ,dmc_base[unit].d_base[i]);#endif	/*	 * Let the DMR finish the MCLR.  At 1 Mbit, it should do so	 * in about a max of 6.4 milliseconds with diagnostics enabled.	 */	addr->bsel1 = DMC_MCLR;	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)		;	/* Did the timer expire or did the DMR finish? */	if ((addr->bsel1 & DMC_RUN) == 0) {		printf("dmc%d: M820 Test Failed\n", unit);		return;	}	/* purge send queue */	IF_DEQUEUE(&sc->sc_if.if_snd, m);	while (m) {		m_freem(m);		IF_DEQUEUE(&sc->sc_if.if_snd, m);	}        for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[sc->sc_bufres.nxmt]; ifxp++) {		if (ifxp->x_xtofree) {			(void) m_freem(ifxp->x_xtofree);			ifxp->x_xtofree = 0;		}	}	/* restart DMC */	dmcinit(unit);	sc->sc_flag &= ~DMC_RESTART;	sc->sc_if.if_collisions++;	/* why not? */}/* * Check to see that transmitted packets don't * lose interrupts.  The device has to be active. */dmcwatch(){        register struct uba_device *ui;	register struct dmc_softc *sc;	struct dmcdevice *addr;	register int i;	for (i = 0; i < nNDMC; i++) {		sc = &dmc_softc[i];		if ((sc->sc_flag & DMC_ACTIVE) == 0)			continue;        	if ((ui = dmcinfo[i]) == 0 || ui->ui_alive == 0)			continue;		if (sc->sc_oused) {			sc->sc_nticks++;			if (sc->sc_nticks > dmc_timeout) {				sc->sc_nticks = 0;        			addr = (struct dmcdevice *)ui->ui_addr;				printd("dmc%d hung: bsel0=%b bsel2=%b\n", i,				    addr->bsel0 & 0xff, DMC0BITS,				    addr->bsel2 & 0xff, DMC2BITS);				dmcrestart(i);			}		}	}	timeout(dmcwatch, (caddr_t) 0, hz);}/* * Check to make sure requestor is priveleged.  suser is * called when not on interrupt stack or when  * there is a system process. */dmcsuser(){	if ( ! (movpsl() & PSL_IS))		return(suser());	return(1);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -