⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kprobes.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
		/* XXX(hch): why is there no hlist_move_head? */		INIT_HLIST_NODE(&ri->hlist);		kretprobe_table_lock(hash, &flags);		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);		kretprobe_table_unlock(hash, &flags);	} else {		rp->nmissed++;		spin_unlock_irqrestore(&rp->lock, flags);	}	return 0;}static int __kprobes __register_kretprobe(struct kretprobe *rp,					  unsigned long called_from){	int ret = 0;	struct kretprobe_instance *inst;	int i;	void *addr;	if (kretprobe_blacklist_size) {		addr = kprobe_addr(&rp->kp);		if (!addr)			return -EINVAL;		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {			if (kretprobe_blacklist[i].addr == addr)				return -EINVAL;		}	}	rp->kp.pre_handler = pre_handler_kretprobe;	rp->kp.post_handler = NULL;	rp->kp.fault_handler = NULL;	rp->kp.break_handler = NULL;	/* Pre-allocate memory for max kretprobe instances */	if (rp->maxactive <= 0) {#ifdef CONFIG_PREEMPT		rp->maxactive = max(10, 2 * NR_CPUS);#else		rp->maxactive = NR_CPUS;#endif	}	spin_lock_init(&rp->lock);	INIT_HLIST_HEAD(&rp->free_instances);	for (i = 0; i < rp->maxactive; i++) {		inst = kmalloc(sizeof(struct kretprobe_instance) +			       rp->data_size, GFP_KERNEL);		if (inst == NULL) {			free_rp_inst(rp);			return -ENOMEM;		}		INIT_HLIST_NODE(&inst->hlist);		hlist_add_head(&inst->hlist, &rp->free_instances);	}	rp->nmissed = 0;	/* Establish function entry probe point */	ret = __register_kprobe(&rp->kp, called_from);	if (ret != 0)		free_rp_inst(rp);	return ret;}static int __register_kretprobes(struct kretprobe **rps, int num,	unsigned long called_from){	int ret = 0, i;	if (num <= 0)		return -EINVAL;	for (i = 0; i < num; i++) {		ret = __register_kretprobe(rps[i], called_from);		if (ret < 0) {			if (i > 0)				unregister_kretprobes(rps, i);			break;		}	}	return ret;}int __kprobes register_kretprobe(struct kretprobe *rp){	return __register_kretprobes(&rp, 1,			(unsigned long)__builtin_return_address(0));}void __kprobes unregister_kretprobe(struct kretprobe *rp){	unregister_kretprobes(&rp, 1);}int __kprobes register_kretprobes(struct kretprobe **rps, int num){	return __register_kretprobes(rps, num,			(unsigned long)__builtin_return_address(0));}void __kprobes unregister_kretprobes(struct kretprobe **rps, int num){	int i;	if (num <= 0)		return;	mutex_lock(&kprobe_mutex);	for (i = 0; i < num; i++)		if (__unregister_kprobe_top(&rps[i]->kp) < 0)			rps[i]->kp.addr = NULL;	mutex_unlock(&kprobe_mutex);	synchronize_sched();	for (i = 0; i < num; i++) {		if (rps[i]->kp.addr) {			__unregister_kprobe_bottom(&rps[i]->kp);			cleanup_rp_inst(rps[i]);		}	}}#else /* CONFIG_KRETPROBES */int __kprobes register_kretprobe(struct kretprobe *rp){	return -ENOSYS;}int __kprobes register_kretprobes(struct kretprobe **rps, int num){	return -ENOSYS;}void __kprobes unregister_kretprobe(struct kretprobe *rp){}void __kprobes unregister_kretprobes(struct kretprobe **rps, int num){}static int __kprobes pre_handler_kretprobe(struct kprobe *p,					   struct pt_regs *regs){	return 0;}#endif /* CONFIG_KRETPROBES */static int __init init_kprobes(void){	int i, err = 0;	unsigned long offset = 0, size = 0;	char *modname, namebuf[128];	const char *symbol_name;	void *addr;	struct kprobe_blackpoint *kb;	/* FIXME allocate the probe table, currently defined statically */	/* initialize all list heads */	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {		INIT_HLIST_HEAD(&kprobe_table[i]);		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);		spin_lock_init(&(kretprobe_table_locks[i].lock));	}	/*	 * Lookup and populate the kprobe_blacklist.	 *	 * Unlike the kretprobe blacklist, we'll need to determine	 * the range of addresses that belong to the said functions,	 * since a kprobe need not necessarily be at the beginning	 * of a function.	 */	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {		kprobe_lookup_name(kb->name, addr);		if (!addr)			continue;		kb->start_addr = (unsigned long)addr;		symbol_name = kallsyms_lookup(kb->start_addr,				&size, &offset, &modname, namebuf);		if (!symbol_name)			kb->range = 0;		else			kb->range = size;	}	if (kretprobe_blacklist_size) {		/* lookup the function address from its name */		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {			kprobe_lookup_name(kretprobe_blacklist[i].name,					   kretprobe_blacklist[i].addr);			if (!kretprobe_blacklist[i].addr)				printk("kretprobe: lookup failed: %s\n",				       kretprobe_blacklist[i].name);		}	}	/* By default, kprobes are enabled */	kprobe_enabled = true;	err = arch_init_kprobes();	if (!err)		err = register_die_notifier(&kprobe_exceptions_nb);	kprobes_initialized = (err == 0);	if (!err)		init_test_probes();	return err;}#ifdef CONFIG_DEBUG_FSstatic void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,		const char *sym, int offset,char *modname){	char *kprobe_type;	if (p->pre_handler == pre_handler_kretprobe)		kprobe_type = "r";	else if (p->pre_handler == setjmp_pre_handler)		kprobe_type = "j";	else		kprobe_type = "k";	if (sym)		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,			sym, offset, (modname ? modname : " "));	else		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);}static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos){	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;}static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos){	(*pos)++;	if (*pos >= KPROBE_TABLE_SIZE)		return NULL;	return pos;}static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v){	/* Nothing to do */}static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p, *kp;	const char *sym = NULL;	unsigned int i = *(loff_t *) v;	unsigned long offset = 0;	char *modname, namebuf[128];	head = &kprobe_table[i];	preempt_disable();	hlist_for_each_entry_rcu(p, node, head, hlist) {		sym = kallsyms_lookup((unsigned long)p->addr, NULL,					&offset, &modname, namebuf);		if (p->pre_handler == aggr_pre_handler) {			list_for_each_entry_rcu(kp, &p->list, list)				report_probe(pi, kp, sym, offset, modname);		} else			report_probe(pi, p, sym, offset, modname);	}	preempt_enable();	return 0;}static struct seq_operations kprobes_seq_ops = {	.start = kprobe_seq_start,	.next  = kprobe_seq_next,	.stop  = kprobe_seq_stop,	.show  = show_kprobe_addr};static int __kprobes kprobes_open(struct inode *inode, struct file *filp){	return seq_open(filp, &kprobes_seq_ops);}static struct file_operations debugfs_kprobes_operations = {	.open           = kprobes_open,	.read           = seq_read,	.llseek         = seq_lseek,	.release        = seq_release,};static void __kprobes enable_all_kprobes(void){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p;	unsigned int i;	mutex_lock(&kprobe_mutex);	/* If kprobes are already enabled, just return */	if (kprobe_enabled)		goto already_enabled;	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {		head = &kprobe_table[i];		hlist_for_each_entry_rcu(p, node, head, hlist)			arch_arm_kprobe(p);	}	kprobe_enabled = true;	printk(KERN_INFO "Kprobes globally enabled\n");already_enabled:	mutex_unlock(&kprobe_mutex);	return;}static void __kprobes disable_all_kprobes(void){	struct hlist_head *head;	struct hlist_node *node;	struct kprobe *p;	unsigned int i;	mutex_lock(&kprobe_mutex);	/* If kprobes are already disabled, just return */	if (!kprobe_enabled)		goto already_disabled;	kprobe_enabled = false;	printk(KERN_INFO "Kprobes globally disabled\n");	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {		head = &kprobe_table[i];		hlist_for_each_entry_rcu(p, node, head, hlist) {			if (!arch_trampoline_kprobe(p))				arch_disarm_kprobe(p);		}	}	mutex_unlock(&kprobe_mutex);	/* Allow all currently running kprobes to complete */	synchronize_sched();	return;already_disabled:	mutex_unlock(&kprobe_mutex);	return;}/* * XXX: The debugfs bool file interface doesn't allow for callbacks * when the bool state is switched. We can reuse that facility when * available */static ssize_t read_enabled_file_bool(struct file *file,	       char __user *user_buf, size_t count, loff_t *ppos){	char buf[3];	if (kprobe_enabled)		buf[0] = '1';	else		buf[0] = '0';	buf[1] = '\n';	buf[2] = 0x00;	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);}static ssize_t write_enabled_file_bool(struct file *file,	       const char __user *user_buf, size_t count, loff_t *ppos){	char buf[32];	int buf_size;	buf_size = min(count, (sizeof(buf)-1));	if (copy_from_user(buf, user_buf, buf_size))		return -EFAULT;	switch (buf[0]) {	case 'y':	case 'Y':	case '1':		enable_all_kprobes();		break;	case 'n':	case 'N':	case '0':		disable_all_kprobes();		break;	}	return count;}static struct file_operations fops_kp = {	.read =         read_enabled_file_bool,	.write =        write_enabled_file_bool,};static int __kprobes debugfs_kprobe_init(void){	struct dentry *dir, *file;	unsigned int value = 1;	dir = debugfs_create_dir("kprobes", NULL);	if (!dir)		return -ENOMEM;	file = debugfs_create_file("list", 0444, dir, NULL,				&debugfs_kprobes_operations);	if (!file) {		debugfs_remove(dir);		return -ENOMEM;	}	file = debugfs_create_file("enabled", 0600, dir,					&value, &fops_kp);	if (!file) {		debugfs_remove(dir);		return -ENOMEM;	}	return 0;}late_initcall(debugfs_kprobe_init);#endif /* CONFIG_DEBUG_FS */module_init(init_kprobes);EXPORT_SYMBOL_GPL(register_kprobe);EXPORT_SYMBOL_GPL(unregister_kprobe);EXPORT_SYMBOL_GPL(register_kprobes);EXPORT_SYMBOL_GPL(unregister_kprobes);EXPORT_SYMBOL_GPL(register_jprobe);EXPORT_SYMBOL_GPL(unregister_jprobe);EXPORT_SYMBOL_GPL(register_jprobes);EXPORT_SYMBOL_GPL(unregister_jprobes);EXPORT_SYMBOL_GPL(jprobe_return);EXPORT_SYMBOL_GPL(register_kretprobe);EXPORT_SYMBOL_GPL(unregister_kretprobe);EXPORT_SYMBOL_GPL(register_kretprobes);EXPORT_SYMBOL_GPL(unregister_kretprobes);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -