📄 trap.c
字号:
u.u_oweupc &= ~SOWEUPC; } goto out; case T_ARITHTRAP+USER: u.u_code = code; i = SIGFPE; break; /* * If the user SP is above the stack segment, * grow the stack automatically. */ case T_SEGFLT+USER: if (grow((unsigned)locr0[SP]) || grow(code)) goto out; u.u_code = code; i = SIGSEGV; break; case T_TABLEFLT: /* allow page table faults in kernel mode */ case T_TABLEFLT+USER: /* page table fault */ panic("ptable fault"); case T_PAGEFLT: /* allow page faults in kernel mode */ case T_PAGEFLT+USER: /* page fault */ i = u.u_error; pagein(code, 0); u.u_error = i; if (type == T_PAGEFLT) return; goto out; case T_BPTFLT+USER: /* bpt instruction fault */ case T_TRCTRAP+USER: /* trace trap */ locr0[PS] &= ~PSL_T; i = SIGTRAP; break; case T_XFCFLT+USER: /* xfc instruction fault */ i = SIGEMT; break; case T_COMPATFLT+USER: /* compatibility mode fault */ u.u_acflag |= ACOMPAT; u.u_code = code; i = SIGILL; break; /* vector processor disabled fault */ case T_VDISFLT: /* kernel mode */ vp_disabled_fault_handler (VP_DIS_KERN_MODE); goto out; break; /* vector processor disabled fault */ case T_VDISFLT+USER: /* user mode */ vp_disabled_fault_handler (VP_DIS_USER_MODE); goto out; break; } psignal(u.u_procp, i);out: if (USERMODE(locr0[PS])) if (CURRENT_CPUDATA->cpu_hlock) panic("holding lock on trap exit"); CURRENT_CPUDATA->cpu_trap++; p = u.u_procp; if (p->p_cursig || ISSIG(p,0)) psig(); /* * take advantage of the fact that longword long-word * aligned writes are atomic here: p_pri could change * as soon as we released the lock anyway. */ p->p_pri = p->p_usrpri; if (CURRENT_CPUDATA->cpu_runrun) { /* * Since we are u.u_procp, clock will normally just change * our priority without moving us from one queue to another * (since the running process is not on a queue.) * If that happened after we setrq ourselves but before we * swtch()'ed, we might not be on the queue indicated by * our priority. */ pcpu = CURRENT_CPUDATA; pcpu->cpu_proc->p_hlock = pcpu->cpu_hlock; pcpu->cpu_hlock=0; (void)spl6(); smp_lock(&lk_rq,LK_RETRY); p->p_pri = p->p_usrpri; setrq(u.u_procp); u.u_ru.ru_nivcsw++; swtch(); pcpu = CURRENT_CPUDATA; pcpu->cpu_hlock = pcpu->cpu_proc->p_hlock; pcpu->cpu_proc->p_hlock=0; } if (u.u_prof.pr_scale > 1) { int ticks; struct timeval *tv = &u.u_ru.ru_stime; ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); if (ticks) addupc(locr0[PC], &u.u_prof, ticks); }}/* * Called from the trap handler when a system call occurs *//*ARGSUSED*/syscall(sp, type, code, pc, psl) unsigned code;{ register int *locr0 = ((int *)&psl)-PS; register caddr_t params; /* known to be r10 below */ register int i; /* known to be r9 below */ register struct sysent *callp; register struct proc *p; register struct nameidata *ndp = &u.u_nd; struct cpudata *pcpu; unsigned int saveaffinity; struct timeval syst; unsigned ocode = 0; syst = u.u_ru.ru_stime; if (!USERMODE(locr0[PS])) panic("syscall"); u.u_ar0 = locr0; if (code == 139) { /* getdprop */ /* XXX */ sigcleanup(); /* XXX */ goto done; /* XXX */ } params = (caddr_t)locr0[AP] + NBPW; u.u_error = 0; callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; if (callp == sysent) { ocode = fuword(params); params += NBPW; callp = (ocode >= nsysent) ? &sysent[63] : &sysent[ocode]; } if (i = callp->sy_narg * sizeof (int)) {#ifndef lint asm("prober $3,r9,(r10)"); /* GROT */ asm("bnequ ok"); /* GROT */ u.u_error = EFAULT; /* GROT */ goto bad; /* GROT */ok:asm("ok:"); /* GROT */ asm("movc3 r9,(r10),_u+U_ARG"); /* GROT */#else bcopy(params, (caddr_t)u.u_arg, (u_int)i);#endif } u.u_ap = u.u_arg; ndp->ni_dirp = (caddr_t)u.u_arg[0]; u.u_r.r_val1 = 0; u.u_r.r_val2 = locr0[R1]; if (setjmp(&u.u_qsave)) { if (CURRENT_CPUDATA->cpu_hlock) { while(CURRENT_CPUDATA->cpu_hlock) { mprintf("lock held on syscall exit %x pc %x\n", CURRENT_CPUDATA->cpu_hlock, CURRENT_CPUDATA->cpu_hlock->l_pc); smp_unlock(CURRENT_CPUDATA->cpu_hlock); } } if (u.u_error == 0 && u.u_eosys == JUSTRETURN) u.u_error = EINTR; } else { u.u_eosys = JUSTRETURN; if ( audswitch #ifdef SYS_TRACE +traceopens#endif SYS_TRACE ) { u.u_gno_indx = 0; u.u_narg = callp->sy_narg; u.u_event = ocode == 0 ? code : ocode;#ifdef SYS_TRACE /* trace it just before we do it! (only if open) */ if (traceopens) { syscall_trace(u.u_event,callp->sy_narg,BEFORE); (*(callp->sy_call))(); syscall_trace(u.u_event,callp->sy_narg,AFTER); } else #endif SYS_TRACE (*(callp->sy_call))(); if ( aud_param[u.u_event][AUD_NPARAM-1] != 'X' ) AUDIT_CALL ( u.u_event, u.u_error, u.u_r.r_val1, AUD_GNO|AUD_HDR|AUD_PRM|AUD_RES, (int *)0, 0 ); } else { (*(callp->sy_call))(); } } if (u.u_eosys == RESTARTSYS) { pc = pc - 2; if (code > 63) pc -= 2; } else if (u.u_error) {#ifndef lintbad:#endif locr0[R0] = u.u_error; locr0[PS] |= PSL_C; /* carry bit */ } else { locr0[R0] = u.u_r.r_val1; locr0[R1] = u.u_r.r_val2; locr0[PS] &= ~PSL_C; }done: p = u.u_procp; if (p->p_cursig || ISSIG(p,0)) psig(); pcpu = CURRENT_CPUDATA; pcpu->cpu_syscall++; if (pcpu->cpu_hlock) panic("holding lock on syscall exit"); p->p_pri = p->p_usrpri; if (smp) { /* might need to fix-up affinity mask if we did a longjmp out * of an ASMP section of code */ p = u.u_procp; pcpu = CURRENT_CPUDATA; if (p->p_vpcontext) { /* this process is a vector process */ if (p->p_vpcontext->vpc_state == VPC_SAVED) { /* this process's vector context has already * been saved, so it can run on any vector * processor. set affinity to the pre-vector * affinity and'ed with the mask of processors * with vector units */ switch_affinity (p->p_vpcontext->vpc_affinity & vpmask); pcpu = CURRENT_CPUDATA; } else { /* our vp context is loaded on this cpu * (vpc_state == VPC_LOAD or * vpc_state == * VPC_LIMBO), * so force the affinity to be this cpu. * debug notes: * - since this should always be true anyway, * maybe we should panic if * p_affinity != cpu_mask ??? * - vpc_state should never be * VPC_WAIT. A state of wait means that * the execution of a vector instruction * has cause the allocation of a * vpcontext area. The state remains * wait only as long a there is a search * being made for an available vector * processor. Since no other user * instruction will be executed while the * vector instruction is pending, a * system call could not be made. */ p->p_affinity = pcpu->cpu_mask; } } else { /* This is not a vector process, so it can run * anywhere */ p->p_affinity = ALLCPU; } } if (pcpu->cpu_runrun) { /* * Since we are u.u_procp, clock will normally just * change our priority without moving us from one * queue to another (since the running process is * not on * a queue.) If that happened after we * setrq ourselves but before we swtch'ed, we might * not be on the queue indicated by our priority. */ (void) spl6(); smp_lock(&lk_rq,LK_RETRY); setrq(p); u.u_ru.ru_nivcsw++; swtch(); } if (u.u_prof.pr_scale > 1) { int ticks; struct timeval *tv = &u.u_ru.ru_stime; ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); if (ticks) addupc(locr0[PC], &u.u_prof, ticks); }}/* * nonexistent system call-- signal process (may want to handle it) * flag error if process won't see signal immediately * Q: should we do that all the time ?? */nosys(){ if (u.u_signal[SIGSYS] == SIG_IGN || u.u_signal[SIGSYS] == SIG_HOLD) u.u_error = EINVAL; psignal(u.u_procp, SIGSYS);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -