📄 vm_glue.c
字号:
* 2. If not enough memory, wake the pageout daemon and let it * clear some space. */voidscheduler(){ register struct proc *p; register int pri; struct proc *pp; int ppri; vm_offset_t addr; vm_size_t size;loop:#ifdef DEBUG while (!enableswap) tsleep((caddr_t)&proc0, PVM, "noswap", 0);#endif pp = NULL; ppri = INT_MIN; for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) { /* XXX should also penalize based on vm_swrss */ pri = p->p_swtime + p->p_slptime - p->p_nice * 8; if (pri > ppri) { pp = p; ppri = pri; } } }#ifdef DEBUG if (swapdebug & SDB_FOLLOW) printf("scheduler: running, procp %x pri %d\n", pp, ppri);#endif /* * Nothing to do, back to sleep */ if ((p = pp) == NULL) { tsleep((caddr_t)&proc0, PVM, "scheduler", 0); goto loop; } /* * We would like to bring someone in. * This part is really bogus cuz we could deadlock on memory * despite our feeble check. * XXX should require at least vm_swrss / 2 */ size = round_page(ctob(UPAGES)); addr = (vm_offset_t) p->p_addr; if (cnt.v_free_count > atop(size)) {#ifdef DEBUG if (swapdebug & SDB_SWAPIN) printf("swapin: pid %d(%s)@%x, pri %d free %d\n", p->p_pid, p->p_comm, p->p_addr, ppri, cnt.v_free_count);#endif vm_map_pageable(kernel_map, addr, addr+size, FALSE); /* * Some architectures need to be notified when the * user area has moved to new physical page(s) (e.g. * see pmax/pmax/vm_machdep.c). */ cpu_swapin(p); (void) splstatclock(); if (p->p_stat == SRUN) setrunqueue(p); p->p_flag |= P_INMEM; (void) spl0(); p->p_swtime = 0; goto loop; } /* * Not enough memory, jab the pageout daemon and wait til the * coast is clear. */#ifdef DEBUG if (swapdebug & SDB_FOLLOW) printf("scheduler: no room for pid %d(%s), free %d\n", p->p_pid, p->p_comm, cnt.v_free_count);#endif (void) splhigh(); VM_WAIT; (void) spl0();#ifdef DEBUG if (swapdebug & SDB_FOLLOW) printf("scheduler: room again, free %d\n", cnt.v_free_count);#endif goto loop;}#define swappable(p) \ (((p)->p_flag & \ (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)/* * Swapout is driven by the pageout daemon. Very simple, we find eligible * procs and unwire their u-areas. We try to always "swap" at least one * process in case we need the room for a swapin. * If any procs have been sleeping/stopped for at least maxslp seconds, * they are swapped. Else, we swap the longest-sleeping or stopped process, * if any, otherwise the longest-resident process. */voidswapout_threads(){ register struct proc *p; struct proc *outp, *outp2; int outpri, outpri2; int didswap = 0; extern int maxslp;#ifdef DEBUG if (!enableswap) return;#endif outp = outp2 = NULL; outpri = outpri2 = 0; for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { if (!swappable(p)) continue; switch (p->p_stat) { case SRUN: if (p->p_swtime > outpri2) { outp2 = p; outpri2 = p->p_swtime; } continue; case SSLEEP: case SSTOP: if (p->p_slptime >= maxslp) { swapout(p); didswap++; } else if (p->p_slptime > outpri) { outp = p; outpri = p->p_slptime; } continue; } } /* * If we didn't get rid of any real duds, toss out the next most * likely sleeping/stopped or running candidate. We only do this * if we are real low on memory since we don't gain much by doing * it (UPAGES pages). */ if (didswap == 0 && cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) { if ((p = outp) == 0) p = outp2;#ifdef DEBUG if (swapdebug & SDB_SWAPOUT) printf("swapout_threads: no duds, try procp %x\n", p);#endif if (p) swapout(p); }}voidswapout(p) register struct proc *p;{ vm_offset_t addr; vm_size_t size;#ifdef DEBUG if (swapdebug & SDB_SWAPOUT) printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n", p->p_pid, p->p_comm, p->p_addr, p->p_stat, p->p_slptime, cnt.v_free_count);#endif size = round_page(ctob(UPAGES)); addr = (vm_offset_t) p->p_addr;#if defined(hp300) || defined(luna68k) /* * Ugh! u-area is double mapped to a fixed address behind the * back of the VM system and accesses are usually through that * address rather than the per-process address. Hence reference * and modify information are recorded at the fixed address and * lost at context switch time. We assume the u-struct and * kernel stack are always accessed/modified and force it to be so. */ { register int i; volatile long tmp; for (i = 0; i < UPAGES; i++) { tmp = *(long *)addr; *(long *)addr = tmp; addr += NBPG; } addr = (vm_offset_t) p->p_addr; }#endif#ifdef mips /* * Be sure to save the floating point coprocessor state before * paging out the u-struct. */ { extern struct proc *machFPCurProcPtr; if (p == machFPCurProcPtr) { MachSaveCurFPState(p); machFPCurProcPtr = (struct proc *)0; } }#endif#ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */ vm_map_pageable(kernel_map, addr, addr+size, TRUE); pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));#endif (void) splhigh(); p->p_flag &= ~P_INMEM; if (p->p_stat == SRUN) remrq(p); (void) spl0(); p->p_swtime = 0;}/* * The rest of these routines fake thread handling */voidassert_wait(event, ruptible) void *event; boolean_t ruptible;{#ifdef lint ruptible++;#endif curproc->p_thread = event;}voidthread_block(){ int s = splhigh(); if (curproc->p_thread) tsleep(curproc->p_thread, PVM, "thrd_block", 0); splx(s);}voidthread_sleep(event, lock, ruptible) void *event; simple_lock_t lock; boolean_t ruptible;{ int s = splhigh();#ifdef lint ruptible++;#endif curproc->p_thread = event; simple_unlock(lock); if (curproc->p_thread) tsleep(event, PVM, "thrd_sleep", 0); splx(s);}voidthread_wakeup(event) void *event;{ int s = splhigh(); wakeup(event); splx(s);}/* * DEBUG stuff */int indent = 0;#include <machine/stdarg.h> /* see subr_prf.c *//*ARGSUSED2*/void#if __STDC__iprintf(const char *fmt, ...)#elseiprintf(fmt /* , va_alist */) char *fmt; /* va_dcl */#endif{ register int i; va_list ap; for (i = indent; i >= 8; i -= 8) printf("\t"); while (--i >= 0) printf(" "); va_start(ap, fmt); printf("%r", fmt, ap); va_end(ap);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -