📄 machdep.c
字号:
int sf_signum; int sf_code; struct sigcontext *sf_scp; int (*sf_handler) (); struct sigcontext *sf_scpcopy; } *fp; /* known to be r9 */ register int oonstack; regs = u.u_ar0; oonstack = u.u_onstack; scp = (struct sigcontext *) regs[SP] - 1; if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) { fp = (struct sigframe *) u.u_sigsp - 1; u.u_onstack = 1; } else fp = (struct sigframe *) scp - 1; /* * Must build signal handler context on stack to be returned to * so that rei instruction in sigcode will pop ps and pc * off correct stack. The remainder of the signal state * used in calling the handler must be placed on the stack * on which the handler is to operate so that the calls * in sigcode will save the registers and such correctly. */ if (!oonstack && (int) fp <= USRSTACK - ctob (u.u_ssize)) grow ((unsigned) fp); ;#ifndef lint asm ("probew $3,$20,(r9)"); asm ("jeql bad");#else if (useracc ((caddr_t) fp, sizeof (struct sigframe), 1)) goto bad;#endif if (!u.u_onstack && (int) scp <= USRSTACK - ctob (u.u_ssize)) grow ((unsigned) scp); ; /* Avoid asm() label botch */#ifndef lint asm ("probew $3,$20,(r11)"); asm ("beql bad");#else if (useracc ((caddr_t) scp, sizeof (struct sigcontext) , 1)) goto bad;#endif fp -> sf_signum = sig; if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGFPE){ fp -> sf_code = u.u_code; u.u_code = 0; } else fp -> sf_code = 0; fp -> sf_scp = scp; fp -> sf_handler = p; /* * Duplicate the pointer to the sigcontext structure. * This one doesn't get popped by the ret, and is used * by sigcleanup to reset the signal state on inward return. */ fp -> sf_scpcopy = scp; /* sigcontext goes on previous stack */ scp -> sc_onstack = oonstack; scp -> sc_mask = mask; /* setup rei */ scp -> sc_sp = (int) & scp -> sc_pc; scp -> sc_pc = regs[PC]; scp -> sc_ps = regs[PS]; regs[SP] = (int) fp; regs[PS] &= ~(PSL_CM | PSL_FPD); regs[PC] = (int) u.u_pcb.pcb_sigc; return;bad: asm ("bad:"); /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ u.u_signal[SIGILL] = SIG_DFL; sig = sigmask(SIGILL); u.u_procp -> p_sigignore &= ~sig; u.u_procp -> p_sigcatch &= ~sig; u.u_procp -> p_sigmask &= ~sig; psignal (u.u_procp, SIGILL);}/* * Routine to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Pop these values in preparation for rei which * follows return from this routine. */sigcleanup (){ register struct sigcontext *scp; scp = (struct sigcontext *) fuword ((caddr_t) u.u_ar0[SP]); if ((int) scp == -1) return; ;#ifndef lint /* only probe 12 here because that's all we need */ asm ("prober $3,$12,(r11)"); asm ("bnequ 1f; ret; 1:");#else if (useracc ((caddr_t) scp, sizeof (*scp), 0)) return;#endif u.u_onstack = scp -> sc_onstack & 01; u.u_procp -> p_sigmask = scp -> sc_mask & ~(sigmask(SIGKILL)|sigmask(SIGSTOP)); u.u_ar0[SP] = scp -> sc_sp;}#ifdef notdefdorti (){ struct frame frame; register int sp; register int reg, mask; extern int ipcreg[]; (void) copyin ((caddr_t) u.u_ar0[FP], (caddr_t) & frame, sizeof (frame)); sp = u.u_ar0[FP] + sizeof (frame); u.u_ar0[PC] = frame.fr_savpc; u.u_ar0[FP] = frame.fr_savfp; u.u_ar0[AP] = frame.fr_savap; mask = frame.fr_mask; for (reg = 0; reg <= 11; reg++) { if (mask & 1) { u.u_ar0[ipcreg[reg]] = fuword ((caddr_t) sp); sp += 4; } mask >>= 1; } sp += frame.fr_spa; u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw; if (frame.fr_s) sp += 4 + 4 * (fuword ((caddr_t) sp) & 0xff); /* phew, now the rei */ u.u_ar0[PC] = fuword ((caddr_t) sp); sp += 4; u.u_ar0[PS] = fuword ((caddr_t) sp); sp += 4; u.u_ar0[PS] |= PSL_USERSET; u.u_ar0[PS] &= ~PSL_USERCLR; u.u_ar0[SP] = (int) sp;}#endif/* * this routine sets the cache to the state passed. enabled/disabled * * The actual routines are entered through cpusw, and are located * in the appropiate cpu dependent routine kaXXX.c */setcache(state)int state;{ if ((*cpup->setcache)(state) < 0 ) panic("No setcache routine configured\n");}/* * Memenable enables the memory controller corrected data reporting. * This runs at regular intervals, turning on the interrupt. * The interrupt is turned off, per memory controller, when error * reporting occurs. Thus we report at most once per memintvl. * * The actual routines are entered through cpusw, and are located * in the appropiate cpu dependent routine kaXXX.c */int memintvl = MEMINTVL;timer_action (){ if ((*cpup->timer_action)() < 0 ) panic("No timer_action routine configured\n"); else if (memintvl > 0) timeout (timer_action, (caddr_t) 0, memintvl * hz);}/* * Memerr is the interrupt routine for corrected read data * interrupts. It calls the apporpriate routine which looks * to see which memory controllers have unreported errors, * reports them, and disables further reporting for a time * on those controller. * * The actual routines are entered through cpusw, and are located * in the appropiate cpu dependent routine kaXXX.c */memerr (){ if ((*cpup->softerr_intr)() < 0 ) panic("No softerr_intr handler configured\n");}/* * Invalidate single all pte's in a cluster */tbiscl (v)unsigned v;{ register caddr_t addr; /* must be first reg var */ register int i; asm (".set TBIS,58");#ifdef vax /* Quiesce vector processor if necessary */ VPSYNC();#endif vax addr = ptob (v); for (i = 0; i < CLSIZE; i++) {#ifdef lint mtpr (TBIS, addr);#else asm ("mtpr r11,$TBIS");#endif addr += NBPG; } tbsync();}int waittime = -1;int shutting_down = 0;boot(paniced, arghowto)int paniced, arghowto;{ register int howto; /* r11 == how to boot */ register int devtype; /* r10 == major of root dev */ register struct mount *mp; register struct gnode *gp; struct gnode *rgp; extern struct gnode *fref(); extern struct gnode *acctp; extern struct gnode *savacctp; extern struct cred *acctcred; extern struct lock_t lk_acct; extern void ( *scs_disable )(); void ( *disable )(); int s;#ifdef lint howto = 0; devtype = 0; printf ("howto %d, devtype %d\n", arghowto, devtype);#endif howto = arghowto; rundown++; shutting_down++; if ((howto & RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) { /* * If accounting is on, turn it off. This allows the usr * filesystem to be umounted cleanly. */ smp_lock(&lk_acct, LK_RETRY); if (savacctp) { acctp = savacctp; savacctp = NULL; } if (gp = acctp) { gfs_lock(gp); if (acctp != NULL) { acctp = NULL; GSYNCG(gp, acctcred); crfree(acctcred); acctcred = NULL; gput(gp); } else gfs_unlock(gp); } smp_unlock(&lk_acct); waittime = 0; (void) spl4 (); gfs_gupdat(NODEV); if (paniced != RB_PANIC) { update(); /* flush dirty blocks */ /* unmount all but root fs */ /* include paranoid checks for active unmounts */ /* and preclude new unmounts */ for (mp = &mount[NMOUNT - 1]; mp > mount; mp--) { smp_lock(&mp->m_lk, LK_RETRY); if ((mp->m_flgs & MTE_DONE) && !(mp->m_flgs & MTE_UMOUNT)) { mp->m_flgs |= MTE_UMOUNT; smp_unlock(&mp->m_lk); GUMOUNT(mp, 1); } else { smp_unlock(&mp->m_lk); } } } printf ("syncing disks... "); /* * spl5 because we don't want to have a user program * scheduled */ s = spl5(); bflush (NODEV, (struct gnode *) 0, 1); splx(s); printf ("done\n"); } /* * Optionally invoke the SCS shutdown routine to disable all local * ports. */ if(( disable = scs_disable )) { u_long save_ipl = splextreme(); (void)(*disable)(); (void)splx( save_ipl ); }#if defined(VAX8600) if(cpu == VAX_8600) { register int i; int *ip; struct sbia_regs *sbiad; extern int ioanum; extern char Sysbase[]; sbiad = (struct sbia_regs *)ioa; ip = (int *)Sysmap+1; *ip &= ~PG_PROT; *ip |= PG_KW; mtpr(TBIS, Sysbase); for(i=0; i<ioanum; i++) { if(BADADDR((caddr_t)sbiad, 4)) continue; sbiad->sbi_unjam = 0; sbiad = (struct sbia_regs *)((char *)sbiad + cpup->pc_ioasize); } ip = (int *)Sysmap+1; *ip &= ~PG_PROT; *ip |= PG_KR; mtpr(TBIS, Sysbase); }#endif VAX8600 splx (0x1f); /* extreme priority */ devtype = major (rootdev); if (howto & RB_HALT) { mtpr (IPL, 0x1f);#if defined (MVAX) || defined (VAX420) if( cpu == MVAX_II || cpu == VAXSTAR || cpu == C_VAXSTAR ) { if(cpu_subtype == ST_MVAXII) ((struct qb_regs *)nexus)->qb_cpmbx = RB_HALTMD; if(cpu_subtype == ST_VAXSTAR) ((struct nb_regs *)nexus)->nb_cpmbx = RB_VS_HALTMD; for (;;) asm ("halt"); }#endif MVAX || VAX420#ifdef VAX6200 if (cpu == VAX_6200) ka6200halt();#endif VAX6200#ifdef VAX6400 if (cpu == VAX_6400) ka6400halt();#endif VAX6400#ifdef VAX9000 if (cpu == VAX_9000) ka9000halt();#endif VAX9000#if defined (VAX3600) || defined (VAX60) if (cpu == VAX_3600 || cpu == VAX_3400 || cpu == VAX_3900 || cpu == VAX_60) { cvqssc->ssc_cpmbx = RB_CV_HALTMD; for (;;) asm ("halt"); }#endif VAX3600 || VAX60 /* halt the slaves please */ if (cpu == VAX_8200) ka820slavehalt(); printf ("\nTHE PROCESSOR CAN NOW BE HALTED.\n"); for (;;); } else { if (paniced == RB_PANIC) { doadump (); /* TXDB_BOOT's itsself */ /* NOTREACHED */ }#ifdef VAX6200 if (cpu==VAX_6200) ka6200reboot();#endif VAX6200#ifdef VAX6400 if (cpu==VAX_6400) ka6400reboot();#endif VAX6400#ifdef VAX9000 if (cpu==VAX_9000) ka9000reboot();#endif VAX9000#ifdef VAX8800 if ((cpu == VAX_8800) || (cpu == VAX_8820)) { cons_putc(N_COMM | N_BOOT_ME); asm("halt"); }#endif cons_putc (TXDB_BOOT); }#if defined(VAX750) || defined(VAX730) || defined(MVAX) || defined(VAX8200) || defined(VAX3600) || defined(VAX60) || defined(VAX420) || defined(VAX6200) || defined(VAX6400) || defined(VAX9000) if ((cpu != VAX_780) && (cpu != VAX_8600)) { asm ("movl r11,r5"); } /* boot flags go in r5 */ if (cpu == MVAX_II || cpu == VAXSTAR || cpu == C_VAXSTAR) { if(cpu_subtype == ST_MVAXII) ((struct qb_regs *)nexus)->qb_cpmbx = RB_REBOOT; if(cpu_subtype == ST_VAXSTAR) ((struct nb_regs *)nexus)->nb_cpmbx = RB_VS_REBOOT; } if (cpu == VAX_3600 || cpu == VAX_3400 || cpu == VAX_3900 || cpu == VAX_60) { cvqssc->ssc_cpmbx = RB_CV_REBOOT; }#endif for (;;) asm ("halt"); /* NOTREACHED */}cons_putc (c){ if ((*cpup->cons_putc)(c) < 0 ) panic("No cons_putc routine configured\n");}/* * Machine check handlers. * * The actual routines are entered through cpusw, and are located * in the appropiate cpu dependent routine kaXXX.c */machinecheck (cmcf)caddr_t cmcf;{ if ((*cpup->machcheck)(cmcf) < 0 ) panic("No machine check handler configured\n");}/* * delay for n microseconds, * call through cpu switch to specific delay routine. */microdelay(usecs)int usecs;{ if ((*cpup->microdelay)(usecs) < 0) panic("No microdelay routine configured\n");}/* * delay for n microseconds, limited to somewhat over 2000 microseconds * using standard vax ICR. */uICRdelay(n)int n;{ struct timeval et, nowt; int saveiccs,s;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -