📄 machdep.c
字号:
/* if clock not enabled or ipl above 0x17 */ /* change so if ipl > 0x15. this was change to VAX SRM */ if ( !(mfpr(ICCS) & ICCS_RUN) || (mfpr(IPL) >= 0x16)) { s=spl6(); saveiccs = mfpr(ICCS); /* save value */ mtpr(NICR, -n); /* load neg n */ mtpr(ICCS, ICCS_RUN+ICCS_TRANS+ICCS_INT+ICCS_ERR); while ( !(mfpr(ICCS) & ICCS_INT)); /* wait */ /* restore interval counter to previous state */ mtpr(NICR,-1000000/hz); mtpr(ICCS, saveiccs+ICCS_TRANS+ICCS_ERR); /*restore*/ splx(s); } else { /* clock is running so call mircotime */ microtime(&et); et.tv_sec += n/1000000; et.tv_usec += n%1000000; if( et.tv_usec > 1000000) { et.tv_usec -= 1000000; et.tv_sec++; } do microtime(&nowt); while ( nowt.tv_sec < et.tv_sec || (nowt.tv_usec < et.tv_usec && nowt.tv_sec <= et.tv_sec)); } return(0);}microtime (tvp)struct timeval *tvp;{ int s = spl6 (); tvp -> tv_sec = time.tv_sec; tvp -> tv_usec = time.tv_usec + (1000000/hz); if ((cpu != VAX_6400) && (cpu != VAX_6200)) tvp -> tv_usec += mfpr(ICR); while (tvp -> tv_usec > 1000000) { tvp -> tv_sec++; tvp -> tv_usec -= 1000000; } splx (s);}/* * delay for n microseconds, limited to somewhat over 2000 microseconds * using counter for lack of ICR. "n" set for uVAX I. */uInoICRdelay(n)int n;{ n /= 6; while (--n >= 0) ; /* wait */ return(0);}/* * delay for n microseconds, limited to somewhat over 2000 microseconds * using counter for lack of ICR. "n" set for uVAX II. */uIInoICRdelay(n)int n;{ /* * For VAXstation 2000 (AKA, VAXstar) and MicroVAX 2000 (AKA, TEAMmate), * measurements with 1 second granularity (using TOY seconds register) * show a delay of n = 10000000 (10 sec) yields an actual delay * between 11 and 12 seconds (+ 10 to 20 %). -- Fred Canter 8/30/86 */ n /= 2; while (--n >= 0) ; /* wait */ return(0);}/* * CVAXstar/PVAX/PVAX1 (KA420 processor) microdelay routine. * * NOTE: DELAY() is called with caches off because configure() is called * before setcache() in startup() above. This means DELAY() * must be accurate for all all cache states. * * The goal is to delay for "n" microseconds. The KA420 CPU * does not have a hardware timer, so we must use a software * insruction counted loop. The software overhead of this * routine increases as the delay decreases (about 20% @ 2000 Usec). * CAUTION: this routine is not intended for delays < 2000 microseconds! * * This routine is somewhat complicated by the fact that the KA420 CPU * has 3 clock speeds (100, 90, 60 nanoseconds) and 2 levels of cache * with 4 possible combinations of enabled/disabled. The strategy is: * * Read the CPU speed from the cache control register (CACR) * and adjust the value of n accordingly. * * The value of n is adjusted again if 1st level or both * caches are off. * * If the 1st level cache is on, we don't care about the state * of the 2nd level cache (1st level cache masks it). * * Here are actual delay times measured on 100ns, 90ns, and * 60ns CPUs when a 10 second delay was requested: * * 100ns - 10.64 seconds - both caches on * 90ns - 10.62 seconds - both caches on * 60ns - 10.33 seconds - both caches on * * 100ns - 10.69 seconds - 1st level cache on, 2nd level cache off * 90ns - 10.68 seconds - 1st level cache on, 2nd level cache off * 60ns - 10.36 seconds - 1st level cache on, 2nd level cache off * * 100ns - 10.45 seconds - 1st level cache off, 2nd level cache on * 90ns - 10.47 seconds - 1st level cache off, 2nd level cache on * 60ns - 10.70 seconds - 1st level cache off, 2nd level cache on * * 100ns - 10.85 seconds - both caches off * 90ns - 10.83 seconds - both caches off * 60ns - 10.42 seconds - both caches off * *//* * This variable defines the state of the * 1st and 2nd level caches, i.e., bit 0 is set * if 1st level cache is on and bit 1 is set if * 2nd level cache is on. The normal state is * both caches on (cvs_cache_on = 3). */int cvs_cache_on = 3;extern int cache_state;extern int cpu_sub_subtype;cVSnoICRdelay(n)int n;{ register struct nb_regs *addr = (struct nb_regs *)nexus; register int cacr; if(cpu_sub_subtype == SB_TMII) { /* TMII uses 90 nanosecond chip * and does not have CACR register */ cacr = 0; /* clear cacr */ n += (n /3); /* add 33% */ n += (n / 10); /* add 10% more */ } else { /* for PVAX */ cacr = addr->nb_cacr & 0x0c0; if (cacr == 0x0c0) { /* 100 nanosecond CPU */ n += (n / 4); /* add 25% */ n += (n / 20); /* add 5% more */ } else if (cacr == 0x080) { /* 90 nanosecond CPU */ n += (n / 3); /* add 33% */ n += (n / 10); /* add 10% more */ } else { /* 60 nanosecond CPU */ n += (n / 2); /* add 50% */ n += (n / 5); /* add 20% more */ n += (n / 5); /* add 20% more */ } } if (cache_state == 0) { /* Caches not enabled yet (probe) */ if (cacr == 0x040) /* 60 nanosecond CPU */ n /= 4; /* divide by 4 */ else n /= 3; /* divide by 3 */ n += (n / 10); /* add 10% */ if (cacr == 0x040) /* 60 nanosecond CPU */ n += (n / 20); /* add 5% */ } else if (cvs_cache_on == 2) { /* Only 2nd level cache on */ n /= 3; /* divide by 3 */ n += (n / 5); /* add 20% */ } else if (cvs_cache_on == 0) { /* Both 1st & 2nd level cache off */ if (cacr == 0x040) /* 60 nanosecond CPU */ n /= 4; /* divide by 4 */ else n /= 3; /* divide by 3 */ n += (n / 10); /* add 10% */ if (cacr == 0x040) /* 60 nanosecond CPU */ n += (n / 20); /* add 5% */ } while (--n >= 0) ; /* wait */ return(0);}/* * delay for n microseconds, limited to somewhat over 2000 microseconds * using SSC (CVAX system support chip) programmable timer for lack of ICR. */uSSCdelay(n)int n;{ int s; s = spl6(); cvqssc->ssc_tnir0 = -n; /* load neg n */ cvqssc->ssc_tcr0 = ICCS_RUN+ICCS_TRANS+ICCS_INT+ICCS_ERR+TCR_STP; while ( !(cvqssc->ssc_tcr0 & ICCS_INT)) ; /* wait */ splx(s); return(0);}/* * delay for n microseconds, limited to somewhat over 2000 microseconds * using RSSC (Rigel system support chip) programmable timer for lack of ICR. */#ifdef VAX6400uRSSCdelay(n)int n;{ int s,caller_ipl; caller_ipl = mfpr(IPL); if (caller_ipl < 0x18) s = splclock(); rssc->s_tnir0 = -n; /* load neg n */ rssc->s_tcr0 = ICCS_RUN+ICCS_TRANS+ICCS_INT+ICCS_ERR+TCR_STP; while ( !(rssc->s_tcr0 & ICCS_INT)) ; /* wait */ if (caller_ipl < 0x18) splx(s); return(0);}#endifphysstrat (bp, strat, prio)register struct buf *bp;int (*strat) (), prio;{ int s; (*strat) (bp); /* pageout daemon doesn't wait for pushed pages or N-buffered */ if (bp->b_flags & (B_DIRTY|B_RAWASYNC)) return; s = spl6 (); while ((bp->b_flags & B_DONE) == 0) sleep ((caddr_t) bp, prio); splx (s);}extern int cold;badaddr(addr,len) caddr_t addr;int len;{ int status,s; int *ip; if (cold) status=(((*cpup->badaddr)(addr, len))); else { ip = (int *)Sysmap+ (btop(((int)&scb.scb_stray)&0x7fffffff)); *ip &= ~PG_PROT; *ip |= PG_KW; mtpr(TBIS, &scb.scb_stray); s=spl7(); switch(cpu) { case VAX_8600: case VAX_780: ubaclrint(); status=(((*cpup->badaddr)(addr, len))); status|=ubasetint(); break; case VAX_6200: case VAX_6400: case VAX_8800: case VAX_8820: case VAX_8200: case VAX_9000: biclrint(); status=(((*cpup->badaddr)(addr, len))); bisetint(); if (cpu == VAX_6200) ka6200_clear_xbe(); if (cpu == VAX_6400) ka6400_clear_xbe(); if (cpu == VAX_9000) ka9000_clear_xbe(); break; case VAX_60: ka60clrmbint(); status=(((*cpup->badaddr)(addr, len))); enafbiclog(); ka60setmbint(); break; default: status=(((*cpup->badaddr)(addr, len))); break; } *ip &= ~PG_PROT; *ip |= PG_KR; mtpr(TBIS, &scb.scb_stray); splx(s); } return(status);}extern int nNUBA;ubaclrint(){ struct uba_regs *ubap; int i; for (i=0; i<nNUBA; i++) { if (ubap = uba_hd[i].uh_uba) ubap->uba_sr=ubap->uba_sr; }}ubasetint(){ struct uba_regs *ubap; int i,ubaerror; ubaerror=0; for (i=0; i<nNUBA; i++) { if (ubap = uba_hd[i].uh_uba) { if(ubap->uba_sr) ubaerror=1; ubap->uba_sr=ubap->uba_sr; ubap->uba_cr= UBACR_IFS | UBACR_BRIE | UBACR_USEFIE | UBACR_SUEFIE | (ubap->uba_cr & 0x7c000000); } } return(ubaerror);}/* * interrupt one of the processors */extern u_long *ka60_ip[];extern char *ka6200_ip[];extern char *calypso_ip[];int *ka8820_ip;intrcpu(whichcpu)int whichcpu;{ union cpusid cpusid; switch (cpu) { case VAX_8200: mtpr(IPIR, 1<< whichcpu); break; case VAX_6200: *ka6200_ip[whichcpu] = 0; break; case VAX_6400: *calypso_ip[whichcpu] = 0; break; case VAX_8820: *ka8820_ip = 1 << ((whichcpu * 8) + 7); break; case VAX_8800: mtpr(INOP,0); break; case VAX_60: *ka60_ip[((whichcpu >> 1) & 0xf)] |= FIPD_IPL16; break; case VAX_9000: mtpr(ICIR, 1<< whichcpu); break; }}/* * called from init_main to see if a network boot has occurred. If * so, available information is read into a local copy of the netblk * structure. As per original design the changing of 'roottype' is what * triggers init_main to assume a diskless network environment. */char boottype[4];netbootchk(){ extern struct netblk *netblk_ptr; extern int roottype; extern int swaptype; extern int dumptype; extern int swapsize; /* * determine if remote or local root or swap * get the device name from the structure set * up at autoconf time */ switch(rpb.devtyp) { case BTD$K_QNA: case BTD$K_LANCE: netblk_ptr = (struct netblk *)&vmb_info.netblk; if (netblk_ptr->rootfs == GT_NFS) { /* * We've determined that we are running diskless */ if (rpb.devtyp == BTD$K_QNA) bcopy("qe0",boottype,sizeof(boottype)); else bcopy("ln0",boottype,sizeof(boottype)); roottype= (int) netblk_ptr->rootfs; swaptype= (int) netblk_ptr->swapfs; swapsize= ((int) netblk_ptr->swapsz) * 1024; if (netblk_ptr->dmpflg != -1) dumptype= ((int) netblk_ptr->dmpflg) * 1024; } break; default: break; }}/* * Get pointer to cpusw table entry for the system we are currently running * on. The pointer returned by this routine will go into "cpup". * * The "cpu" variable (ULTRIX system type) is passed in and compared to the * system_type entry in the cpusw table for a match. */struct cpusw *cpuswitch_entry(cpu) int cpu; /* the ULTRIX system type */{ register int i; /* loop index */ for (i = 0; cpusw[i].system_type != 0; i++) { if (cpusw[i].system_type == cpu) return((struct cpusw *)&cpusw[i]); } panic("processor type not configured");}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -