⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pgalloc.h

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 H
字号:
/* $Id: pgalloc.h,v 1.3 2000/02/23 00:41:38 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000 by Ralf Baechle at alii * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */#ifndef _ASM_PGALLOC_H#define _ASM_PGALLOC_H#include <linux/config.h>/* TLB flushing: * *  - flush_tlb_all() flushes all processes TLB entries *  - flush_tlb_mm(mm) flushes the specified mm context TLB entries *  - flush_tlb_page(mm, vmaddr) flushes a single page *  - flush_tlb_range(mm, start, end) flushes a range of pages */extern void flush_tlb_all(void);extern void flush_tlb_mm(struct mm_struct *mm);extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,			       unsigned long end);extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);extern inline void flush_tlb_pgtables(struct mm_struct *mm,                                      unsigned long start, unsigned long end){	/* Nothing to do on MIPS.  */}/* * Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on ASN bits * if any. */#define pgd_quicklist (current_cpu_data.pgd_quick)#define pmd_quicklist ((unsigned long *)0)#define pte_quicklist (current_cpu_data.pte_quick)#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)extern __inline__ pgd_t *get_pgd_slow(void){	pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;	if (ret) {		init = pgd_offset(&init_mm, 0);		pgd_init((unsigned long)ret);		memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));	}	return ret;}extern __inline__ pgd_t *get_pgd_fast(void){	unsigned long *ret;	if((ret = pgd_quicklist) != NULL) {		pgd_quicklist = (unsigned long *)(*ret);		ret[0] = ret[1];		pgtable_cache_size--;	} else		ret = (unsigned long *)get_pgd_slow();	return (pgd_t *)ret;}extern __inline__ void free_pgd_fast(pgd_t *pgd){	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;	pgd_quicklist = (unsigned long *) pgd;	pgtable_cache_size++;}extern __inline__ void free_pgd_slow(pgd_t *pgd){	free_page((unsigned long)pgd);}extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);extern __inline__ pte_t *get_pte_fast(void){	unsigned long *ret;	if((ret = (unsigned long *)pte_quicklist) != NULL) {		pte_quicklist = (unsigned long *)(*ret);		ret[0] = ret[1];		pgtable_cache_size--;	}	return (pte_t *)ret;}extern __inline__ void free_pte_fast(pte_t *pte){	*(unsigned long *)pte = (unsigned long) pte_quicklist;	pte_quicklist = (unsigned long *) pte;	pgtable_cache_size++;}extern __inline__ void free_pte_slow(pte_t *pte){	free_page((unsigned long)pte);}/* We don't use pmd cache, so these are dummy routines */extern __inline__ pmd_t *get_pmd_fast(void){	return (pmd_t *)0;}extern __inline__ void free_pmd_fast(pmd_t *pmd){}extern __inline__ void free_pmd_slow(pmd_t *pmd){}extern void __bad_pte(pmd_t *pmd);extern void __bad_pte_kernel(pmd_t *pmd);#define pte_free_kernel(pte)    free_pte_fast(pte)#define pte_free(pte)           free_pte_fast(pte)#define pgd_free(pgd)           free_pgd_fast(pgd)#define pgd_alloc()             get_pgd_fast()extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address){	address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);	if (pmd_none(*pmd)) {		pte_t *page = get_pte_fast();		if (page) {			pmd_val(*pmd) = (unsigned long)page;			return page + address;		}		return get_pte_kernel_slow(pmd, address);	}	if (pmd_bad(*pmd)) {		__bad_pte_kernel(pmd);		return NULL;	}	return (pte_t *) pmd_page(*pmd) + address;}extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address){	address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);	if (pmd_none(*pmd)) {		pte_t *page = get_pte_fast();		if (page) {			pmd_val(*pmd) = (unsigned long)page;			return page + address;		}		return get_pte_slow(pmd, address);	}	if (pmd_bad(*pmd)) {		__bad_pte(pmd);		return NULL;	}	return (pte_t *) pmd_page(*pmd) + address;}/* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */extern inline void pmd_free(pmd_t * pmd){}extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address){	return (pmd_t *) pgd;}#define pmd_free_kernel		pmd_free#define pmd_alloc_kernel	pmd_allocextern int do_check_pgt_cache(int, int);extern inline void set_pgdir(unsigned long address, pgd_t entry){	struct task_struct * p;	pgd_t *pgd;#ifdef CONFIG_SMP	int i;#endif		read_lock(&tasklist_lock);	for_each_task(p) {		if (!p->mm)			continue;		*pgd_offset(p->mm,address) = entry;	}	read_unlock(&tasklist_lock);#ifndef CONFIG_SMP	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)		pgd[address >> PGDIR_SHIFT] = entry;#else	/* To pgd_alloc/pgd_free, one holds master kernel lock and so does our	   callee, so we can modify pgd caches of other CPUs as well. -jj */	for (i = 0; i < NR_CPUS; i++)		for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)			pgd[address >> PGDIR_SHIFT] = entry;#endif}#endif /* _ASM_PGALLOC_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -