⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 3 页
字号:
	 */	if (!pmap->pm_segtab) {		/* change entries in kernel pmap */#ifdef DIAGNOSTIC		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)			panic("pmap_change_wiring");#endif		pte = kvtopte(va);	} else {		if (!(pte = pmap_segmap(pmap, va)))			return;		pte += (va >> PGSHIFT) & (NPTEPG - 1);	}	i = pmaxpagesperpage;	if (!(pte->pt_entry & PG_WIRED) && p)		pmap->pm_stats.wired_count += i;	else if ((pte->pt_entry & PG_WIRED) && !p)		pmap->pm_stats.wired_count -= i;	do {		if (pte->pt_entry & PG_V)			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;		pte++;	} while (--i != 0);}/* *	Routine:	pmap_extract *	Function: *		Extract the physical page address associated *		with the given map/virtual_address pair. */vm_offset_tpmap_extract(pmap, va)	register pmap_t	pmap;	vm_offset_t va;{	register vm_offset_t pa;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_extract(%x, %x) -> ", pmap, va);#endif	if (!pmap->pm_segtab) {#ifdef DIAGNOSTIC		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)			panic("pmap_extract");#endif		pa = kvtopte(va)->pt_entry & PG_FRAME;	} else {		register pt_entry_t *pte;		if (!(pte = pmap_segmap(pmap, va)))			pa = 0;		else {			pte += (va >> PGSHIFT) & (NPTEPG - 1);			pa = pte->pt_entry & PG_FRAME;		}	}	if (pa)		pa |= va & PGOFSET;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_extract: pa %x\n", pa);#endif	return (pa);}/* *	Copy the range specified by src_addr/len *	from the source map to the range dst_addr/len *	in the destination map. * *	This routine is only advisory and need not do anything. */voidpmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)	pmap_t dst_pmap;	pmap_t src_pmap;	vm_offset_t dst_addr;	vm_size_t len;	vm_offset_t src_addr;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_copy(%x, %x, %x, %x, %x)\n",		       dst_pmap, src_pmap, dst_addr, len, src_addr);#endif}/* *	Require that all active physical maps contain no *	incorrect entries NOW.  [This update includes *	forcing updates of any address map caching.] * *	Generally used to insure that a thread about *	to run will see a semantically correct world. */voidpmap_update(){#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_update()\n");#endif}/* *	Routine:	pmap_collect *	Function: *		Garbage collects the physical map system for *		pages which are no longer used. *		Success need not be guaranteed -- that is, there *		may well be pages which are not referenced, but *		others may be collected. *	Usage: *		Called by the pageout daemon when pages are scarce. */voidpmap_collect(pmap)	pmap_t pmap;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_collect(%x)\n", pmap);#endif}/* *	pmap_zero_page zeros the specified (machine independent) *	page. */voidpmap_zero_page(phys)	vm_offset_t phys;{	register int *p, *end;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_zero_page(%x)\n", phys);#endif	p = (int *)MACH_PHYS_TO_CACHED(phys);	end = p + PAGE_SIZE / sizeof(int);	do {		p[0] = 0;		p[1] = 0;		p[2] = 0;		p[3] = 0;		p += 4;	} while (p != end);}/* *	pmap_copy_page copies the specified (machine independent) *	page. */voidpmap_copy_page(src, dst)	vm_offset_t src, dst;{	register int *s, *d, *end;	register int tmp0, tmp1, tmp2, tmp3;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_copy_page(%x, %x)\n", src, dst);#endif	s = (int *)MACH_PHYS_TO_CACHED(src);	d = (int *)MACH_PHYS_TO_CACHED(dst);	end = s + PAGE_SIZE / sizeof(int);	do {		tmp0 = s[0];		tmp1 = s[1];		tmp2 = s[2];		tmp3 = s[3];		d[0] = tmp0;		d[1] = tmp1;		d[2] = tmp2;		d[3] = tmp3;		s += 4;		d += 4;	} while (s != end);}/* *	Routine:	pmap_pageable *	Function: *		Make the specified pages (by pmap, offset) *		pageable (or not) as requested. * *		A page which is not pageable may not take *		a fault; therefore, its page table entry *		must remain valid for the duration. * *		This routine is merely advisory; pmap_enter *		will specify that these pages are to be wired *		down (or not) as appropriate. */voidpmap_pageable(pmap, sva, eva, pageable)	pmap_t		pmap;	vm_offset_t	sva, eva;	boolean_t	pageable;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_pageable(%x, %x, %x, %x)\n",		       pmap, sva, eva, pageable);#endif}/* *	Clear the modify bits on the specified physical page. */voidpmap_clear_modify(pa)	vm_offset_t pa;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_clear_modify(%x)\n", pa);#endif#ifdef ATTR	pmap_attributes[atop(pa)] &= ~PMAP_ATTR_MOD;#endif}/* *	pmap_clear_reference: * *	Clear the reference bit on the specified physical page. */voidpmap_clear_reference(pa)	vm_offset_t pa;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_clear_reference(%x)\n", pa);#endif#ifdef ATTR	pmap_attributes[atop(pa)] &= ~PMAP_ATTR_REF;#endif}/* *	pmap_is_referenced: * *	Return whether or not the specified physical page is referenced *	by any physical maps. */boolean_tpmap_is_referenced(pa)	vm_offset_t pa;{#ifdef ATTR	return (pmap_attributes[atop(pa)] & PMAP_ATTR_REF);#else	return (FALSE);#endif}/* *	pmap_is_modified: * *	Return whether or not the specified physical page is modified *	by any physical maps. */boolean_tpmap_is_modified(pa)	vm_offset_t pa;{#ifdef ATTR	return (pmap_attributes[atop(pa)] & PMAP_ATTR_MOD);#else	return (FALSE);#endif}vm_offset_tpmap_phys_address(ppn)	int ppn;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_phys_address(%x)\n", ppn);#endif	return (pmax_ptob(ppn));}/* * Miscellaneous support routines *//* * Allocate a hardware PID and return it. * It takes almost as much or more time to search the TLB for a * specific PID and flush those entries as it does to flush the entire TLB. * Therefore, when we allocate a new PID, we just take the next number. When * we run out of numbers, we flush the TLB, increment the generation count * and start over. PID zero is reserved for kernel use. * This is called only by switch(). */intpmap_alloc_tlbpid(p)	register struct proc *p;{	register pmap_t pmap;	register int id;	pmap = &p->p_vmspace->vm_pmap;	if (pmap->pm_tlbgen != tlbpid_gen) {		id = tlbpid_cnt;		if (id == VMMACH_NUM_PIDS) {			MachTLBFlush();			/* reserve tlbpid_gen == 0 to alway mean invalid */			if (++tlbpid_gen == 0)				tlbpid_gen = 1;			id = 1;		}		tlbpid_cnt = id + 1;		pmap->pm_tlbpid = id;		pmap->pm_tlbgen = tlbpid_gen;	} else		id = pmap->pm_tlbpid;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_TLBPID)) {		if (curproc)			printf("pmap_alloc_tlbpid: curproc %d '%s' ",				curproc->p_pid, curproc->p_comm);		else			printf("pmap_alloc_tlbpid: curproc <none> ");		printf("segtab %x tlbpid %d pid %d '%s'\n",			pmap->pm_segtab, id, p->p_pid, p->p_comm);	}#endif	return (id);}/* * Remove a physical to virtual address translation. */voidpmap_remove_pv(pmap, va, pa)	pmap_t pmap;	vm_offset_t va, pa;{	register pv_entry_t pv, npv;	int s;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY))		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);#endif	/*	 * Remove page from the PV table (raise IPL since we	 * may be called at interrupt time).	 */	if (!IS_VM_PHYSADDR(pa))		return;	pv = pa_to_pvh(pa);	s = splimp();	/*	 * If it is the first entry on the list, it is actually	 * in the header and we must copy the following entry up	 * to the header.  Otherwise we must search the list for	 * the entry.  In either case we free the now unused entry.	 */	if (pmap == pv->pv_pmap && va == pv->pv_va) {		npv = pv->pv_next;		if (npv) {			*pv = *npv;			free((caddr_t)npv, M_VMPVENT);		} else			pv->pv_pmap = NULL;#ifdef DEBUG		remove_stats.pvfirst++;#endif	} else {		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {#ifdef DEBUG			remove_stats.pvsearch++;#endif			if (pmap == npv->pv_pmap && va == npv->pv_va)				goto fnd;		}#ifdef DIAGNOSTIC		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);		panic("pmap_remove_pv");#endif	fnd:		pv->pv_next = npv->pv_next;		free((caddr_t)npv, M_VMPVENT);	}	splx(s);}/* *	vm_page_alloc1: * *	Allocate and return a memory cell with no associated object. */vm_page_tvm_page_alloc1(){	register vm_page_t	mem;	int		spl;	spl = splimp();				/* XXX */	simple_lock(&vm_page_queue_free_lock);	if (vm_page_queue_free.tqh_first == NULL) {		simple_unlock(&vm_page_queue_free_lock);		splx(spl);		return (NULL);	}	mem = vm_page_queue_free.tqh_first;	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);	cnt.v_free_count--;	simple_unlock(&vm_page_queue_free_lock);	splx(spl);	mem->flags = PG_BUSY | PG_CLEAN | PG_FAKE;	mem->wire_count = 0;	/*	 *	Decide if we should poke the pageout daemon.	 *	We do this if the free count is less than the low	 *	water mark, or if the free count is less than the high	 *	water mark (but above the low water mark) and the inactive	 *	count is less than its target.	 *	 *	We don't have the counts locked ... if they change a little,	 *	it doesn't really matter.	 */	if (cnt.v_free_count < cnt.v_free_min ||	    (cnt.v_free_count < cnt.v_free_target &&	     cnt.v_inactive_count < cnt.v_inactive_target))		thread_wakeup((int)&vm_pages_needed);	return (mem);}/* *	vm_page_free1: * *	Returns the given page to the free list, *	disassociating it with any VM object. * *	Object and page must be locked prior to entry. */voidvm_page_free1(mem)	register vm_page_t	mem;{	if (mem->flags & PG_ACTIVE) {		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);		mem->flags &= ~PG_ACTIVE;		cnt.v_active_count--;	}	if (mem->flags & PG_INACTIVE) {		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);		mem->flags &= ~PG_INACTIVE;		cnt.v_inactive_count--;	}	if (!(mem->flags & PG_FICTITIOUS)) {		int	spl;		spl = splimp();		simple_lock(&vm_page_queue_free_lock);		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);		cnt.v_free_count++;		simple_unlock(&vm_page_queue_free_lock);		splx(spl);	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -