📄 machdep.c
字号:
{/* A bug in the R3000 that causes the cache isolate to fail if the write buffers * are full is worked around via wbflush. */ int s; s = splextreme(); (*(cpup->wbflush))(); (*(cpup->page_iflush))(addr); splx(s);}page_dflush (addr){/* A bug in the R3000 that causes the cache isolate to fail if the write buffers * are full is worked around via wbflush. */ int s; s = splextreme(); (*(cpup->wbflush))(); (*(cpup->page_dflush))(addr); splx(s);}/* * Call system specific initialization routine (for splm, intr vectors, etc). */cpu_initialize(){ if ((*(cpup->init))() < 0) panic("No initialization routine configured\n");}getspl(){ return((*(cpup->getspl))());}whatspl(){ return((*(cpup->whatspl))());}setlock(l) struct lock_t *l;{ if(!smp) /* If 1 CPU, don't need interlocked operations */ if (l->l_lock) return(0); else { l->l_lock=1; return(1); } else return(bbssi(31,l)); /* Set bit 31 with interlocked operation */}clearlock(l) struct lock_t *l;{ if(!smp) /* If 1 CPU, don't need interlocked operations */ l->l_lock = 0; else return(bbcci(31,l)); /* Clear bit 31 with interlocked operation */ }struct second_tlb { /* dump tlb information */ union tlb_hi tlb_high; union tlb_lo tlb_low;} second_tlb[NTLBENTRIES];stop_secondary_cpu(){ struct cpudata *pcpu; pcpu = CURRENT_CPUDATA; save(); /* save process's context in u pcb *//* What to do to flush write buffers */ save_tlb(second_tlb); /* for debugging purpose */ pcpu->cpu_state &= ~CPU_RUN; pcpu->cpu_state |= CPU_STOP; for(;;) /* Something else ?? */ ;}sig_parent_swtch(){ /* the caller to sig_parent_swtch() should have got lk_rq */ CURRENT_CPUDATA->cpu_state |= CPU_SIGPARENT; swtch();}set_bit_atomic(bitpos,base)unsigned long bitpos;unsigned long *base;{ if(!smp) { /* If 1 CPU, don't use interlocked instructions */ if ((*base) & (1<<bitpos)) return(0); *(base) |= (1<<bitpos); return(1); } else return(bbssi(bitpos,base)); /* MP, use interlocked operation */}clear_bit_atomic(bitpos,base)unsigned long bitpos;unsigned long *base;{ if(!smp) /* If 1 CPU, don't use interlocked instructions */ if ((*base) & (1<<bitpos)) { *(base) &= ~(1<<bitpos); return(1); } else return(0); else return(bbcci(bitpos,base)); /* MP, use interlocked operation */}extern char *kn5800_ip[];intrcpu(whichcpu)int whichcpu;{ switch (cpu) { case DS_5800: *kn5800_ip[whichcpu] = 0; /* this should a byte-type instruction */ break; default: break; }}cpuident(){ switch (cpu) { case DS_5800: return(kn5800_cpuid()); default: return(0); }}int start_stack_held = 0; /* indicates if any cpu is using the startup stack */int current_secondary; /* secondary cpu currently being started up */struct proc *idle_proc; /* idle proc of the currently starting cpu*/extern struct user *boot_idle_up;extern unsigned cputype_word, fptype_word;secondary_startup(){ int i; unsigned cputype,fptype; splextreme(); clear_bev(); for (i=0; i < NTLBENTRIES; i++) invaltlb(i); tlbwired(TLBWIREDBASE, 0, UADDR, K0_TO_PHYS(boot_idle_up)>>(PGSHIFT-PTE_PFNSHIFT) | PG_M | PG_V | PG_G); u.u_pcb.pcb_cpuptr = cpudata[current_secondary]; flush_cache(); cputype = get_cpu_irr(); if(cputype != cputype_word) { printf("WARNING: cpu %d version (0x%X) does not match with boot cpu version (0x%X)\n",current_secondary,cputype,cputype_word); } fptype = get_fpc_irr(); if( (fptype &= IRR_IMP_MASK) != fptype_word) { printf("WARNING: FPU version (0x%X) of cpu %d does not match with boot cpu's FPU version (0x%X)\n",fptype,current_secondary,fptype_word); } fp_init(); secondary_init(); /* cpu specific secondary initialization */ splclock(); /* somebody lowers the ipl */ smp_lock(&lk_rq,LK_RETRY); /* synchs with the swtch of idle proc */ CURRENT_CPUDATA->cpu_proc = idle_proc; init_tlbpid(); remrq(idle_proc); get_tlbpid(idle_proc); smp_unlock(&lk_rq); idle_proc->p_cpumask = CURRENT_CPUDATA->cpu_mask; CURRENT_CPUDATA->cpu_state = CPU_RUN; startrtclock(); resume(pcbb(idle_proc)); /* resumes the idle proc on the secondary. see init_idleproc() below */}secondary_init(){ /* should be through cpusw when more MP MIPS system become ready*/ switch(cpu) { case DS_5800: kn5800_init_secondary(); return; default: return; }}init_idleproc(cpunum){ int idlepid,found,s; register struct proc *cptr,*nq; while(start_stack_held) /* if some other cpu using the startup stack */ sleep((caddr_t)&start_stack_held,PZERO+1); start_stack_held = 1; idlepid = get_proc_slot(); if(idlepid == 0) { tablefull("proc"); u.u_error = EAGAIN; start_stack_held = 0; wakeup((caddr_t)&start_stack_held); return(0); /* get_proc_slot sets u.u_error to EAGAIN */ } current_secondary = cpunum; idle_proc = &proc[idlepid]; if(newproc(idle_proc,0)) { switch_affinity(boot_cpu_mask); /* Avoid a race condiation when more than 1 processor already running. Else the parent may be put to sleep after the wakeup in the child below */ bcopy("idleproc",(caddr_t)&u.u_comm[0], MAXCOMLEN); idle_proc->p_affinity = 1<<current_secondary; idle_proc->p_type |= SSYS; idle_proc->p_sched |= SLOAD; splclock(); smp_lock(&lk_rq,LK_RETRY); setrq(idle_proc); idle_proc->p_mips_flag |= SIDLEP; /* wake up the parent process, when child proc is all set up */ wakeup((caddr_t)idle_proc); if(save()) { if(CURRENT_CPUDATA->cpu_num != current_secondary) panic("idle proc not back on the correct secondary"); CURRENT_CPUDATA->cpu_idleproc = idle_proc; s = splclock(); smp_lock(&lk_procqs,LK_RETRY); cptr = idle_proc->p_pptr->p_cptr; found = 0; /* make swapper the parent */ while(cptr) { if (cptr == idle_proc) { idle_proc->p_pptr->p_cptr = cptr->p_osptr; nq = idle_proc->p_osptr; if (nq != NULL) nq->p_ysptr = NULL; if(proc[0].p_cptr) proc[0].p_cptr->p_ysptr = idle_proc; idle_proc->p_osptr = proc[0].p_cptr; idle_proc->p_ysptr = NULL; proc[0].p_cptr = idle_proc; idle_proc->p_pptr = &proc[0]; idle_proc->p_ppid = 0; found = 1; break; } cptr = cptr->p_osptr; } if(found == 0) panic("init_idleproc: not found in child queue"); smp_unlock(&lk_procqs); splx(s); start_stack_held = 0; wakeup((caddr_t)&start_stack_held); idle(); } start_idleproc(); /* start the primary's idle process */ } /* make the parent sleep till child sets up the idle proc ready for the secondary cpu */ sleep((caddr_t)idle_proc,PZERO-1); return(1); /* success */}u_long ci_ucode = 0;alert_cpu() /* inform every cpu that this process is available */{ int s; s = splclock(); smp_lock(&lk_rq,LK_RETRY); whichqs = ALLCPU; smp_unlock(&lk_rq); splx(s);}crc( ctp, inicrc, len, dp )register char *ctp;register u_long inicrc;register u_long len;register char *dp;{ register u_long index; while( len > 0 ) { inicrc = (((char)inicrc ^ *dp++) & 0x0ff) | (inicrc & 0xffffff00); index = 0x0f & inicrc; inicrc = inicrc >> 4; inicrc ^= *((u_long *)ctp + index); index = 0x0f & inicrc; inicrc = inicrc >> 4; inicrc ^= *((u_long *)ctp + index); --len; } return(inicrc);}/* * Routine to check if the process has read or write access * to a segment of user space given by its virtual address * and length in bytes. * * useracc must be called in process context. * * Parameters: * va Virtual address. * len Number of bytes to check. Must be greater than 0. * rw = 1 for read access check, 0 for write access check. * * Returns: * 0 Access not allowed. * 1 Access is allowed. */useracc(va,len,rw)unsigned va; /* Process virtual address */int len; /* Length in bytes, must be > 0 */int rw; /* mode of access check */{ register struct proc *p; register unsigned vpn; register struct pte *pte; register int prot; int smindex; if (!IS_KUSEG(va)) { /* Address not in user space, NO Access */ return(0); } prot = rw ? PROT_URKR : PROT_UW; p = u.u_procp; /* * Check access right for each page, exit * the loop and return 0 if one of the pages * cannot be accessed. */ for (vpn = btop(va); vpn <= btop(va+len-1); vpn++) { pte = vtopte(p,vpn); if (pte == 0) return(0); /* No access */ /* * If I/O space is mapped to virtual addresses * then return 0. this prevents undesired access * to address space created by the mmap system call. */ if (isasmsv(p,vpn,&smindex) && /* SHMEM */ (p->p_sm[smindex].sm_p->sm_perm.mode & IPC_MMAP)) return(0); if (pte->pg_prot < prot) { /* Access is not allowed. If SHMEM check * elsewhere for true protection. */ if (isasmsv(p,vpn,&smindex) && /* SHMEM */ (p->p_sm[smindex].sm_pflag) >= prot) continue; return(0); } } return(1); /* Access allowed */}/* * Routine to handle stray interrupts. * Parameter: * ep Pointer to exception frame. * vec The vector obtained from reading the vector register. * */stray(ep,vec)int *ep;int vec;{ extern int cpu; if (cold) { if ((cpu == DS_5400) || (cpu == DS_5500)) cvec = (vec - 0x200) & 0xfffe; else cvec = vec; } else { cprintf("Stray intr, vec=0x%x\n",vec); }}/* * Passive release */passive_release(ep,vec)int *ep;int vec;{}int mips_spl_arch_type =0;spl_init(){ if (cpu == DS_5000_100) mips_spl_arch_type =1;}/* * Routine to convert TURBOchannel boot slot into logical controller number. */getbootctlr(){ extern char **ub_argv; extern int rex_base; extern char bootctlr[]; extern char consmagic[]; extern struct tc_slot tc_slot[]; char *cp; int i; bootctlr[0] = '\0'; cp = (char *)&console_magic; for(i=0;i<4;i++) consmagic[i] = *cp++; consmagic[4]='\0'; if(!rex_base) { return(0); } else { cp = (char *)&ub_argv[1][0]; while(*cp != '/') cp++; cp--; if(((strncmp(cp+2,"rz",2)==0)) || (strncmp(cp+2,"tz",2) == 0)) { for(i=0;i<=8;i++) { if((strcmp(tc_slot[i].devname,"asc")==0) && (tc_slot[i].slot == *cp - '0')) { bootctlr[0] = tc_slot[i].unit + '0'; continue; } } } else { if((strncmp(cp+2,"mop",3)==0) || (strncmp(cp+2,"tftp",4)==0)) { for(i=0;i<=8;i++) { if(((strcmp(tc_slot[i].devname,"ln")==0) || (strcmp(tc_slot[i].devname,"fza")==0)) && (tc_slot[i].slot == *cp - '0')) { bootctlr[0] = tc_slot[i].unit + '0'; continue; } } } } return(0); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -