⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kprobes.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 2 页
字号:
		return -EINVAL;	p->mod_refcounted = 0;	/*	 * Check if are we probing a module.	 */	probed_mod = module_text_address((unsigned long) p->addr);	if (probed_mod) {		struct module *calling_mod = module_text_address(called_from);		/*		 * We must allow modules to probe themself and in this case		 * avoid incrementing the module refcount, so as to allow		 * unloading of self probing modules.		 */		if (calling_mod && calling_mod != probed_mod) {			if (unlikely(!try_module_get(probed_mod)))				return -EINVAL;			p->mod_refcounted = 1;		} else			probed_mod = NULL;	}	p->nmissed = 0;	mutex_lock(&kprobe_mutex);	old_p = get_kprobe(p->addr);	if (old_p) {		ret = register_aggr_kprobe(old_p, p);		if (!ret)			atomic_inc(&kprobe_count);		goto out;	}	ret = arch_prepare_kprobe(p);	if (ret)		goto out;	INIT_HLIST_NODE(&p->hlist);	hlist_add_head_rcu(&p->hlist,		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);	if (kprobe_enabled) {		if (atomic_add_return(1, &kprobe_count) == \				(ARCH_INACTIVE_KPROBE_COUNT + 1))			register_page_fault_notifier(&kprobe_page_fault_nb);		arch_arm_kprobe(p);	}out:	mutex_unlock(&kprobe_mutex);	if (ret && probed_mod)		module_put(probed_mod);	return ret;}int __kprobes register_kprobe(struct kprobe *p){	return __register_kprobe(p, (unsigned long)__builtin_return_address(0));}void __kprobes unregister_kprobe(struct kprobe *p){	struct module *mod;	struct kprobe *old_p, *list_p;	int cleanup_p;	mutex_lock(&kprobe_mutex);	old_p = get_kprobe(p->addr);	if (unlikely(!old_p)) {		mutex_unlock(&kprobe_mutex);		return;	}	if (p != old_p) {		list_for_each_entry_rcu(list_p, &old_p->list, list)			if (list_p == p)			/* kprobe p is a valid probe */				goto valid_p;		mutex_unlock(&kprobe_mutex);		return;	}valid_p:	if (old_p == p ||	    (old_p->pre_handler == aggr_pre_handler &&	     p->list.next == &old_p->list && p->list.prev == &old_p->list)) {		/*		 * Only probe on the hash list. Disarm only if kprobes are		 * enabled - otherwise, the breakpoint would already have		 * been removed. We save on flushing icache.		 */		if (kprobe_enabled)			arch_disarm_kprobe(p);		hlist_del_rcu(&old_p->hlist);		cleanup_p = 1;	} else {		list_del_rcu(&p->list);		cleanup_p = 0;	}	mutex_unlock(&kprobe_mutex);	synchronize_sched();	if (p->mod_refcounted) {		mod = module_text_address((unsigned long)p->addr);		if (mod)			module_put(mod);	}	if (cleanup_p) {		if (p != old_p) {			list_del_rcu(&p->list);			kfree(old_p);		}		arch_remove_kprobe(p);	} else {		mutex_lock(&kprobe_mutex);		if (p->break_handler)			old_p->break_handler = NULL;		if (p->post_handler){			list_for_each_entry_rcu(list_p, &old_p->list, list){				if (list_p->post_handler){					cleanup_p = 2;					break;				}			}			if (cleanup_p == 0)				old_p->post_handler = NULL;		}		mutex_unlock(&kprobe_mutex);	}	/* Call unregister_page_fault_notifier()	 * if no probes are active	 */	mutex_lock(&kprobe_mutex);	if (atomic_add_return(-1, &kprobe_count) == \				ARCH_INACTIVE_KPROBE_COUNT)		unregister_page_fault_notifier(&kprobe_page_fault_nb);	mutex_unlock(&kprobe_mutex);	return;}static struct notifier_block kprobe_exceptions_nb = {	.notifier_call = kprobe_exceptions_notify,	.priority = 0x7fffffff /* we need to be notified first */};int __kprobes register_jprobe(struct jprobe *jp){	/* Todo: Verify probepoint is a function entry point */	jp->kp.pre_handler = setjmp_pre_handler;	jp->kp.break_handler = longjmp_break_handler;	return __register_kprobe(&jp->kp,		(unsigned long)__builtin_return_address(0));}void __kprobes unregister_jprobe(struct jprobe *jp){	unregister_kprobe(&jp->kp);}#ifdef ARCH_SUPPORTS_KRETPROBES/* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. */static int __kprobes pre_handler_kretprobe(struct kprobe *p,					   struct pt_regs *regs){	struct kretprobe *rp = container_of(p, struct kretprobe, kp);	unsigned long flags = 0;	/*TODO: consider to only swap the RA after the last pre_handler fired */	spin_lock_irqsave(&kretprobe_lock, flags);	if (!hlist_empty(&rp->free_instances)) {		struct kretprobe_instance *ri;		ri = hlist_entry(rp->free_instances.first,				 struct kretprobe_instance, uflist);		ri->rp = rp;		ri->task = current;		arch_prepare_kretprobe(ri, regs);		/* XXX(hch): why is there no hlist_move_head? */		hlist_del(&ri->uflist);		hlist_add_head(&ri->uflist, &ri->rp->used_instances);		hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));	} else		rp->nmissed++;	spin_unlock_irqrestore(&kretprobe_lock, flags);	return 0;}int __kprobes register_kretprobe(struct kretprobe *rp){	int ret = 0;	struct kretprobe_instance *inst;	int i;	rp->kp.pre_handler = pre_handler_kretprobe;	rp->kp.post_handler = NULL;	rp->kp.fault_handler = NULL;	rp->kp.break_handler = NULL;	/* Pre-allocate memory for max kretprobe instances */	if (rp->maxactive <= 0) {#ifdef CONFIG_PREEMPT		rp->maxactive = max(10, 2 * NR_CPUS);#else		rp->maxactive = NR_CPUS;#endif	}	INIT_HLIST_HEAD(&rp->used_instances);	INIT_HLIST_HEAD(&rp->free_instances);	for (i = 0; i < rp->maxactive; i++) {		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);		if (inst == NULL) {			free_rp_inst(rp);			return -ENOMEM;		}		INIT_HLIST_NODE(&inst->uflist);		hlist_add_head(&inst->uflist, &rp->free_instances);	}	rp->nmissed = 0;	/* Establish function entry probe point */	if ((ret = __register_kprobe(&rp->kp,		(unsigned long)__builtin_return_address(0))) != 0)		free_rp_inst(rp);	return ret;}#else /* ARCH_SUPPORTS_KRETPROBES */int __kprobes register_kretprobe(struct kretprobe *rp){	return -ENOSYS;}static int __kprobes pre_handler_kretprobe(struct kprobe *p,					   struct pt_regs *regs){	return 0;}#endif /* ARCH_SUPPORTS_KRETPROBES */void __kprobes unregister_kretprobe(struct kretprobe *rp){	unsigned long flags;	struct kretprobe_instance *ri;	struct hlist_node *pos, *next;	unregister_kprobe(&rp->kp);	/* No race here */	spin_lock_irqsave(&kretprobe_lock, flags);	hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {		ri->rp = NULL;		hlist_del(&ri->uflist);	}	spin_unlock_irqrestore(&kretprobe_lock, flags);	free_rp_inst(rp);}static int __init init_kprobes(void){	int i, err = 0;	/* FIXME allocate the probe table, currently defined statically */	/* initialize all list heads */	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {		INIT_HLIST_HEAD(&kprobe_table[i]);		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);	}	atomic_set(&kprobe_count, 0);	/* By default, kprobes are enabled */	kprobe_enabled = true;	err = arch_init_kprobes();	if (!err)		err = register_die_notifier(&kprobe_exceptions_nb);	return err;}#ifdef CONFIG_DEBUG_FSstatic void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,		const char *sym, int offset,char *modname){	char *kprobe_type;	if (p->pre_handler == pre_handler_kretprobe)		kprobe_type = "r";	else if (p->pre_handler == setjmp_pre_handler)		kprobe_type = "j";	else		kprobe_type = "k";	if (sym)		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,			sym, offset, (modname ? modname : " "));	else		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);}static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos){	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;}static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos){	(*pos)++;	if (*pos >= KPROBE_TABLE_SIZE)		return NULL;	return pos;}static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v){	/* Nothing to do */}static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p, *kp;	const char *sym = NULL;	unsigned int i = *(loff_t *) v;	unsigned long offset = 0;	char *modname, namebuf[128];	head = &kprobe_table[i];	preempt_disable();	hlist_for_each_entry_rcu(p, node, head, hlist) {		sym = kallsyms_lookup((unsigned long)p->addr, NULL,					&offset, &modname, namebuf);		if (p->pre_handler == aggr_pre_handler) {			list_for_each_entry_rcu(kp, &p->list, list)				report_probe(pi, kp, sym, offset, modname);		} else			report_probe(pi, p, sym, offset, modname);	}	preempt_enable();	return 0;}static struct seq_operations kprobes_seq_ops = {	.start = kprobe_seq_start,	.next  = kprobe_seq_next,	.stop  = kprobe_seq_stop,	.show  = show_kprobe_addr};static int __kprobes kprobes_open(struct inode *inode, struct file *filp){	return seq_open(filp, &kprobes_seq_ops);}static struct file_operations debugfs_kprobes_operations = {	.open           = kprobes_open,	.read           = seq_read,	.llseek         = seq_lseek,	.release        = seq_release,};static void __kprobes enable_all_kprobes(void){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p;	unsigned int i;	mutex_lock(&kprobe_mutex);	/* If kprobes are already enabled, just return */	if (kprobe_enabled)		goto already_enabled;	/*	 * Re-register the page fault notifier only if there are any	 * active probes at the time of enabling kprobes globally	 */	if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)		register_page_fault_notifier(&kprobe_page_fault_nb);	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {		head = &kprobe_table[i];		hlist_for_each_entry_rcu(p, node, head, hlist)			arch_arm_kprobe(p);	}	kprobe_enabled = true;	printk(KERN_INFO "Kprobes globally enabled\n");already_enabled:	mutex_unlock(&kprobe_mutex);	return;}static void __kprobes disable_all_kprobes(void){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p;	unsigned int i;	mutex_lock(&kprobe_mutex);	/* If kprobes are already disabled, just return */	if (!kprobe_enabled)		goto already_disabled;	kprobe_enabled = false;	printk(KERN_INFO "Kprobes globally disabled\n");	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {		head = &kprobe_table[i];		hlist_for_each_entry_rcu(p, node, head, hlist) {			if (!arch_trampoline_kprobe(p))				arch_disarm_kprobe(p);		}	}	mutex_unlock(&kprobe_mutex);	/* Allow all currently running kprobes to complete */	synchronize_sched();	mutex_lock(&kprobe_mutex);	/* Unconditionally unregister the page_fault notifier */	unregister_page_fault_notifier(&kprobe_page_fault_nb);already_disabled:	mutex_unlock(&kprobe_mutex);	return;}/* * XXX: The debugfs bool file interface doesn't allow for callbacks * when the bool state is switched. We can reuse that facility when * available */static ssize_t read_enabled_file_bool(struct file *file,	       char __user *user_buf, size_t count, loff_t *ppos){	char buf[3];	if (kprobe_enabled)		buf[0] = '1';	else		buf[0] = '0';	buf[1] = '\n';	buf[2] = 0x00;	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);}static ssize_t write_enabled_file_bool(struct file *file,	       const char __user *user_buf, size_t count, loff_t *ppos){	char buf[32];	int buf_size;	buf_size = min(count, (sizeof(buf)-1));	if (copy_from_user(buf, user_buf, buf_size))		return -EFAULT;	switch (buf[0]) {	case 'y':	case 'Y':	case '1':		enable_all_kprobes();		break;	case 'n':	case 'N':	case '0':		disable_all_kprobes();		break;	}	return count;}static struct file_operations fops_kp = {	.read =         read_enabled_file_bool,	.write =        write_enabled_file_bool,};static int __kprobes debugfs_kprobe_init(void){	struct dentry *dir, *file;	unsigned int value = 1;	dir = debugfs_create_dir("kprobes", NULL);	if (!dir)		return -ENOMEM;	file = debugfs_create_file("list", 0444, dir, NULL,				&debugfs_kprobes_operations);	if (!file) {		debugfs_remove(dir);		return -ENOMEM;	}	file = debugfs_create_file("enabled", 0600, dir,					&value, &fops_kp);	if (!file) {		debugfs_remove(dir);		return -ENOMEM;	}	return 0;}late_initcall(debugfs_kprobe_init);#endif /* CONFIG_DEBUG_FS */module_init(init_kprobes);EXPORT_SYMBOL_GPL(register_kprobe);EXPORT_SYMBOL_GPL(unregister_kprobe);EXPORT_SYMBOL_GPL(register_jprobe);EXPORT_SYMBOL_GPL(unregister_jprobe);EXPORT_SYMBOL_GPL(jprobe_return);EXPORT_SYMBOL_GPL(register_kretprobe);EXPORT_SYMBOL_GPL(unregister_kretprobe);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -