⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kprobes.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  Kernel Probes (KProbes) *  kernel/kprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel *		Probes initial implementation (includes suggestions from *		Rusty Russell). * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with *		hlists and exceptions notifier as suggested by Andi Kleen. * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes *		interface to access function arguments. * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes *		exceptions notifier to be first on the priority list. * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi *		<prasanna@in.ibm.com> added function-return probes. */#include <linux/kprobes.h>#include <linux/hash.h>#include <linux/init.h>#include <linux/slab.h>#include <linux/stddef.h>#include <linux/module.h>#include <linux/moduleloader.h>#include <linux/kallsyms.h>#include <linux/freezer.h>#include <linux/seq_file.h>#include <linux/debugfs.h>#include <linux/kdebug.h>#include <asm-generic/sections.h>#include <asm/cacheflush.h>#include <asm/errno.h>#include <asm/uaccess.h>#define KPROBE_HASH_BITS 6#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)/* * Some oddball architectures like 64bit powerpc have function descriptors * so this must be overridable. */#ifndef kprobe_lookup_name#define kprobe_lookup_name(name, addr) \	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))#endifstatic struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];static atomic_t kprobe_count;/* NOTE: change this value only with kprobe_mutex held */static bool kprobe_enabled;DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;static struct notifier_block kprobe_page_fault_nb = {	.notifier_call = kprobe_exceptions_notify,	.priority = 0x7fffffff /* we need to notified first */};#ifdef __ARCH_WANT_KPROBES_INSN_SLOT/* * kprobe->ainsn.insn points to the copy of the instruction to be * single-stepped. x86_64, POWER4 and above have no-exec support and * stepping on the instruction on a vmalloced/kmalloced/data page * is a recipe for disaster */#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))struct kprobe_insn_page {	struct hlist_node hlist;	kprobe_opcode_t *insns;		/* Page of instruction slots */	char slot_used[INSNS_PER_PAGE];	int nused;	int ngarbage;};enum kprobe_slot_state {	SLOT_CLEAN = 0,	SLOT_DIRTY = 1,	SLOT_USED = 2,};static struct hlist_head kprobe_insn_pages;static int kprobe_garbage_slots;static int collect_garbage_slots(void);static int __kprobes check_safety(void){	int ret = 0;#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)	ret = freeze_processes();	if (ret == 0) {		struct task_struct *p, *q;		do_each_thread(p, q) {			if (p != current && p->state == TASK_RUNNING &&			    p->pid != 0) {				printk("Check failed: %s is running\n",p->comm);				ret = -1;				goto loop_end;			}		} while_each_thread(p, q);	}loop_end:	thaw_processes();#else	synchronize_sched();#endif	return ret;}/** * get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. */kprobe_opcode_t __kprobes *get_insn_slot(void){	struct kprobe_insn_page *kip;	struct hlist_node *pos; retry:	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {		if (kip->nused < INSNS_PER_PAGE) {			int i;			for (i = 0; i < INSNS_PER_PAGE; i++) {				if (kip->slot_used[i] == SLOT_CLEAN) {					kip->slot_used[i] = SLOT_USED;					kip->nused++;					return kip->insns + (i * MAX_INSN_SIZE);				}			}			/* Surprise!  No unused slots.  Fix kip->nused. */			kip->nused = INSNS_PER_PAGE;		}	}	/* If there are any garbage slots, collect it and try again. */	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {		goto retry;	}	/* All out of space.  Need to allocate a new page. Use slot 0. */	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);	if (!kip)		return NULL;	/*	 * Use module_alloc so this page is within +/- 2GB of where the	 * kernel image and loaded module images reside. This is required	 * so x86_64 can correctly handle the %rip-relative fixups.	 */	kip->insns = module_alloc(PAGE_SIZE);	if (!kip->insns) {		kfree(kip);		return NULL;	}	INIT_HLIST_NODE(&kip->hlist);	hlist_add_head(&kip->hlist, &kprobe_insn_pages);	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);	kip->slot_used[0] = SLOT_USED;	kip->nused = 1;	kip->ngarbage = 0;	return kip->insns;}/* Return 1 if all garbages are collected, otherwise 0. */static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx){	kip->slot_used[idx] = SLOT_CLEAN;	kip->nused--;	if (kip->nused == 0) {		/*		 * Page is no longer in use.  Free it unless		 * it's the last one.  We keep the last one		 * so as not to have to set it up again the		 * next time somebody inserts a probe.		 */		hlist_del(&kip->hlist);		if (hlist_empty(&kprobe_insn_pages)) {			INIT_HLIST_NODE(&kip->hlist);			hlist_add_head(&kip->hlist,				       &kprobe_insn_pages);		} else {			module_free(NULL, kip->insns);			kfree(kip);		}		return 1;	}	return 0;}static int __kprobes collect_garbage_slots(void){	struct kprobe_insn_page *kip;	struct hlist_node *pos, *next;	/* Ensure no-one is preepmted on the garbages */	if (check_safety() != 0)		return -EAGAIN;	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {		int i;		if (kip->ngarbage == 0)			continue;		kip->ngarbage = 0;	/* we will collect all garbages */		for (i = 0; i < INSNS_PER_PAGE; i++) {			if (kip->slot_used[i] == SLOT_DIRTY &&			    collect_one_slot(kip, i))				break;		}	}	kprobe_garbage_slots = 0;	return 0;}void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty){	struct kprobe_insn_page *kip;	struct hlist_node *pos;	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {		if (kip->insns <= slot &&		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {			int i = (slot - kip->insns) / MAX_INSN_SIZE;			if (dirty) {				kip->slot_used[i] = SLOT_DIRTY;				kip->ngarbage++;			} else {				collect_one_slot(kip, i);			}			break;		}	}	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)		collect_garbage_slots();}#endif/* We have preemption disabled.. so it is safe to use __ versions */static inline void set_kprobe_instance(struct kprobe *kp){	__get_cpu_var(kprobe_instance) = kp;}static inline void reset_kprobe_instance(void){	__get_cpu_var(kprobe_instance) = NULL;}/* * This routine is called either: * 	- under the kprobe_mutex - during kprobe_[un]register() * 				OR * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c */struct kprobe __kprobes *get_kprobe(void *addr){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p;	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];	hlist_for_each_entry_rcu(p, node, head, hlist) {		if (p->addr == addr)			return p;	}	return NULL;}/* * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs){	struct kprobe *kp;	list_for_each_entry_rcu(kp, &p->list, list) {		if (kp->pre_handler) {			set_kprobe_instance(kp);			if (kp->pre_handler(kp, regs))				return 1;		}		reset_kprobe_instance();	}	return 0;}static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,					unsigned long flags){	struct kprobe *kp;	list_for_each_entry_rcu(kp, &p->list, list) {		if (kp->post_handler) {			set_kprobe_instance(kp);			kp->post_handler(kp, regs, flags);			reset_kprobe_instance();		}	}}static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,					int trapnr){	struct kprobe *cur = __get_cpu_var(kprobe_instance);	/*	 * if we faulted "during" the execution of a user specified	 * probe handler, invoke just that probe's fault handler	 */	if (cur && cur->fault_handler) {		if (cur->fault_handler(cur, regs, trapnr))			return 1;	}	return 0;}static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs){	struct kprobe *cur = __get_cpu_var(kprobe_instance);	int ret = 0;	if (cur && cur->break_handler) {		if (cur->break_handler(cur, regs))			ret = 1;	}	reset_kprobe_instance();	return ret;}/* Walks the list and increments nmissed count for multiprobe case */void __kprobes kprobes_inc_nmissed_count(struct kprobe *p){	struct kprobe *kp;	if (p->pre_handler != aggr_pre_handler) {		p->nmissed++;	} else {		list_for_each_entry_rcu(kp, &p->list, list)			kp->nmissed++;	}	return;}/* Called with kretprobe_lock held */void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,				struct hlist_head *head){	/* remove rp inst off the rprobe_inst_table */	hlist_del(&ri->hlist);	if (ri->rp) {		/* remove rp inst off the used list */		hlist_del(&ri->uflist);		/* put rp inst back onto the free list */		INIT_HLIST_NODE(&ri->uflist);		hlist_add_head(&ri->uflist, &ri->rp->free_instances);	} else		/* Unregistering */		hlist_add_head(&ri->hlist, head);}struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk){	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];}/* * This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated * with this task. These left over instances represent probed functions * that have been called but will never return. */void __kprobes kprobe_flush_task(struct task_struct *tk){	struct kretprobe_instance *ri;	struct hlist_head *head, empty_rp;	struct hlist_node *node, *tmp;	unsigned long flags = 0;	INIT_HLIST_HEAD(&empty_rp);	spin_lock_irqsave(&kretprobe_lock, flags);	head = kretprobe_inst_table_head(tk);	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {		if (ri->task == tk)			recycle_rp_inst(ri, &empty_rp);	}	spin_unlock_irqrestore(&kretprobe_lock, flags);	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {		hlist_del(&ri->hlist);		kfree(ri);	}}static inline void free_rp_inst(struct kretprobe *rp){	struct kretprobe_instance *ri;	struct hlist_node *pos, *next;	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {		hlist_del(&ri->uflist);		kfree(ri);	}}/* * Keep all fields in the kprobe consistent */static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p){	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));}/** Add the new probe to old_p->list. Fail if this is the* second jprobe at the address - two jprobes can't coexist*/static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p){	if (p->break_handler) {		if (old_p->break_handler)			return -EEXIST;		list_add_tail_rcu(&p->list, &old_p->list);		old_p->break_handler = aggr_break_handler;	} else		list_add_rcu(&p->list, &old_p->list);	if (p->post_handler && !old_p->post_handler)		old_p->post_handler = aggr_post_handler;	return 0;}/* * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p){	copy_kprobe(p, ap);	flush_insn_slot(ap);	ap->addr = p->addr;	ap->pre_handler = aggr_pre_handler;	ap->fault_handler = aggr_fault_handler;	if (p->post_handler)		ap->post_handler = aggr_post_handler;	if (p->break_handler)		ap->break_handler = aggr_break_handler;	INIT_LIST_HEAD(&ap->list);	list_add_rcu(&p->list, &ap->list);	hlist_replace_rcu(&p->hlist, &ap->hlist);}/* * This is the second or subsequent kprobe at the address - handle * the intricacies */static int __kprobes register_aggr_kprobe(struct kprobe *old_p,					  struct kprobe *p){	int ret = 0;	struct kprobe *ap;	if (old_p->pre_handler == aggr_pre_handler) {		copy_kprobe(old_p, p);		ret = add_new_kprobe(old_p, p);	} else {		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);		if (!ap)			return -ENOMEM;		add_aggr_kprobe(ap, old_p);		copy_kprobe(ap, p);		ret = add_new_kprobe(ap, p);	}	return ret;}static int __kprobes in_kprobes_functions(unsigned long addr){	if (addr >= (unsigned long)__kprobes_text_start &&	    addr < (unsigned long)__kprobes_text_end)		return -EINVAL;	return 0;}static int __kprobes __register_kprobe(struct kprobe *p,	unsigned long called_from){	int ret = 0;	struct kprobe *old_p;	struct module *probed_mod;	/*	 * If we have a symbol_name argument look it up,	 * and add it to the address.  That way the addr	 * field can either be global or relative to a symbol.	 */	if (p->symbol_name) {		if (p->addr)			return -EINVAL;		kprobe_lookup_name(p->symbol_name, p->addr);	}	if (!p->addr)		return -EINVAL;	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);	if (!kernel_text_address((unsigned long) p->addr) ||	    in_kprobes_functions((unsigned long) p->addr))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -