vdso.c
来自「底层驱动开发」· C语言 代码 · 共 623 行 · 第 1/2 页
C
623 行
/* * linux/arch/ppc64/kernel/vdso.c * * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/module.h>#include <linux/errno.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/mm.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/stddef.h>#include <linux/unistd.h>#include <linux/slab.h>#include <linux/user.h>#include <linux/elf.h>#include <linux/security.h>#include <linux/bootmem.h>#include <asm/pgtable.h>#include <asm/system.h>#include <asm/processor.h>#include <asm/mmu.h>#include <asm/mmu_context.h>#include <asm/machdep.h>#include <asm/cputable.h>#include <asm/sections.h>#include <asm/vdso.h>#undef DEBUG#ifdef DEBUG#define DBG(fmt...) printk(fmt)#else#define DBG(fmt...)#endif/* * The vDSOs themselves are here */extern char vdso64_start, vdso64_end;extern char vdso32_start, vdso32_end;static void *vdso64_kbase = &vdso64_start;static void *vdso32_kbase = &vdso32_start;unsigned int vdso64_pages;unsigned int vdso32_pages;/* Signal trampolines user addresses */unsigned long vdso64_rt_sigtramp;unsigned long vdso32_sigtramp;unsigned long vdso32_rt_sigtramp;/* Format of the patch table */struct vdso_patch_def{ u32 pvr_mask, pvr_value; const char *gen_name; const char *fix_name;};/* Table of functions to patch based on the CPU type/revision * * TODO: Improve by adding whole lists for each entry */static struct vdso_patch_def vdso_patches[] = { { 0xffff0000, 0x003a0000, /* POWER5 */ "__kernel_sync_dicache", "__kernel_sync_dicache_p5" }, { 0xffff0000, 0x003b0000, /* POWER5 */ "__kernel_sync_dicache", "__kernel_sync_dicache_p5" },};/* * Some infos carried around for each of them during parsing at * boot time. */struct lib32_elfinfo{ Elf32_Ehdr *hdr; /* ptr to ELF */ Elf32_Sym *dynsym; /* ptr to .dynsym section */ unsigned long dynsymsize; /* size of .dynsym section */ char *dynstr; /* ptr to .dynstr section */ unsigned long text; /* offset of .text section in .so */};struct lib64_elfinfo{ Elf64_Ehdr *hdr; Elf64_Sym *dynsym; unsigned long dynsymsize; char *dynstr; unsigned long text;};#ifdef __DEBUGstatic void dump_one_vdso_page(struct page *pg, struct page *upg){ printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), page_count(pg), pg->flags); if (upg/* && pg != upg*/) { printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT), page_count(upg), upg->flags); } printk("\n");}static void dump_vdso_pages(struct vm_area_struct * vma){ int i; if (!vma || test_thread_flag(TIF_32BIT)) { printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); for (i=0; i<vdso32_pages; i++) { struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); struct page *upg = (vma && vma->vm_mm) ? follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0) : NULL; dump_one_vdso_page(pg, upg); } } if (!vma || !test_thread_flag(TIF_32BIT)) { printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); for (i=0; i<vdso64_pages; i++) { struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); struct page *upg = (vma && vma->vm_mm) ? follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0) : NULL; dump_one_vdso_page(pg, upg); } }}#endif /* DEBUG *//* * Keep a dummy vma_close for now, it will prevent VMA merging. */static void vdso_vma_close(struct vm_area_struct * vma){}/* * Our nopage() function, maps in the actual vDSO kernel pages, they will * be mapped read-only by do_no_page(), and eventually COW'ed, either * right away for an initial write access, or by do_wp_page(). */static struct page * vdso_vma_nopage(struct vm_area_struct * vma, unsigned long address, int *type){ unsigned long offset = address - vma->vm_start; struct page *pg; void *vbase = test_thread_flag(TIF_32BIT) ? vdso32_kbase : vdso64_kbase; DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n", current->comm, address, offset); if (address < vma->vm_start || address > vma->vm_end) return NOPAGE_SIGBUS; /* * Last page is systemcfg, special handling here, no get_page() a * this is a reserved page */ if ((vma->vm_end - address) <= PAGE_SIZE) return virt_to_page(systemcfg); pg = virt_to_page(vbase + offset); get_page(pg); DBG(" ->page count: %d\n", page_count(pg)); return pg;}static struct vm_operations_struct vdso_vmops = { .close = vdso_vma_close, .nopage = vdso_vma_nopage,};/* * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long vdso_pages; unsigned long vdso_base; if (test_thread_flag(TIF_32BIT)) { vdso_pages = vdso32_pages; vdso_base = VDSO32_MBASE; } else { vdso_pages = vdso64_pages; vdso_base = VDSO64_MBASE; } current->thread.vdso_base = 0; /* vDSO has a problem and was disabled, just don't "enable" it for the * process */ if (vdso_pages == 0) return 0; vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (vma == NULL) return -ENOMEM; memset(vma, 0, sizeof(*vma)); /* * pick a base address for the vDSO in process space. We try to put it * at vdso_base which is the "natural" base for it, but we might fail * and end up putting it elsewhere. */ vdso_base = get_unmapped_area(NULL, vdso_base, vdso_pages << PAGE_SHIFT, 0, 0); if (vdso_base & ~PAGE_MASK) { kmem_cache_free(vm_area_cachep, vma); return (int)vdso_base; } current->thread.vdso_base = vdso_base; vma->vm_mm = mm; vma->vm_start = current->thread.vdso_base; /* * the VMA size is one page more than the vDSO since systemcfg * is mapped in the last one */ vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT); /* * our vma flags don't have VM_WRITE so by default, the process isn't allowed * to write those pages. * gdb can break that with ptrace interface, and thus trigger COW on those * pages but it's then your responsibility to never do that on the "data" page * of the vDSO or you'll stop getting kernel updates and your nice userland * gettimeofday will be totally dead. It's fine to use that for setting * breakpoints in the vDSO code pages though */ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; vma->vm_flags |= mm->def_flags; vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; vma->vm_ops = &vdso_vmops; down_write(&mm->mmap_sem); if (insert_vm_struct(mm, vma)) { up_write(&mm->mmap_sem); kmem_cache_free(vm_area_cachep, vma); return -ENOMEM; } mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; up_write(&mm->mmap_sem); return 0;}static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, unsigned long *size){ Elf32_Shdr *sechdrs; unsigned int i; char *secnames; /* Grab section headers and strings so we can tell who is who */ sechdrs = (void *)ehdr + ehdr->e_shoff; secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; /* Find the section they want */ for (i = 1; i < ehdr->e_shnum; i++) { if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { if (size) *size = sechdrs[i].sh_size; return (void *)ehdr + sechdrs[i].sh_offset; } } *size = 0; return NULL;}static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, unsigned long *size){ Elf64_Shdr *sechdrs; unsigned int i; char *secnames; /* Grab section headers and strings so we can tell who is who */ sechdrs = (void *)ehdr + ehdr->e_shoff; secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?