⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kprobes.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
			recycle_rp_inst(ri, &empty_rp);	}	kretprobe_table_unlock(hash, &flags);	INIT_HLIST_HEAD(&empty_rp);	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {		hlist_del(&ri->hlist);		kfree(ri);	}}static inline void free_rp_inst(struct kretprobe *rp){	struct kretprobe_instance *ri;	struct hlist_node *pos, *next;	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {		hlist_del(&ri->hlist);		kfree(ri);	}}static void __kprobes cleanup_rp_inst(struct kretprobe *rp){	unsigned long flags, hash;	struct kretprobe_instance *ri;	struct hlist_node *pos, *next;	struct hlist_head *head;	/* No race here */	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {		kretprobe_table_lock(hash, &flags);		head = &kretprobe_inst_table[hash];		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {			if (ri->rp == rp)				ri->rp = NULL;		}		kretprobe_table_unlock(hash, &flags);	}	free_rp_inst(rp);}/* * Keep all fields in the kprobe consistent */static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p){	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));}/** Add the new probe to old_p->list. Fail if this is the* second jprobe at the address - two jprobes can't coexist*/static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p){	if (p->break_handler) {		if (old_p->break_handler)			return -EEXIST;		list_add_tail_rcu(&p->list, &old_p->list);		old_p->break_handler = aggr_break_handler;	} else		list_add_rcu(&p->list, &old_p->list);	if (p->post_handler && !old_p->post_handler)		old_p->post_handler = aggr_post_handler;	return 0;}/* * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p){	copy_kprobe(p, ap);	flush_insn_slot(ap);	ap->addr = p->addr;	ap->pre_handler = aggr_pre_handler;	ap->fault_handler = aggr_fault_handler;	if (p->post_handler)		ap->post_handler = aggr_post_handler;	if (p->break_handler)		ap->break_handler = aggr_break_handler;	INIT_LIST_HEAD(&ap->list);	list_add_rcu(&p->list, &ap->list);	hlist_replace_rcu(&p->hlist, &ap->hlist);}/* * This is the second or subsequent kprobe at the address - handle * the intricacies */static int __kprobes register_aggr_kprobe(struct kprobe *old_p,					  struct kprobe *p){	int ret = 0;	struct kprobe *ap;	if (old_p->pre_handler == aggr_pre_handler) {		copy_kprobe(old_p, p);		ret = add_new_kprobe(old_p, p);	} else {		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);		if (!ap)			return -ENOMEM;		add_aggr_kprobe(ap, old_p);		copy_kprobe(ap, p);		ret = add_new_kprobe(ap, p);	}	return ret;}static int __kprobes in_kprobes_functions(unsigned long addr){	struct kprobe_blackpoint *kb;	if (addr >= (unsigned long)__kprobes_text_start &&	    addr < (unsigned long)__kprobes_text_end)		return -EINVAL;	/*	 * If there exists a kprobe_blacklist, verify and	 * fail any probe registration in the prohibited area	 */	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {		if (kb->start_addr) {			if (addr >= kb->start_addr &&			    addr < (kb->start_addr + kb->range))				return -EINVAL;		}	}	return 0;}/* * If we have a symbol_name argument, look it up and add the offset field * to it. This way, we can specify a relative address to a symbol. */static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p){	kprobe_opcode_t *addr = p->addr;	if (p->symbol_name) {		if (addr)			return NULL;		kprobe_lookup_name(p->symbol_name, addr);	}	if (!addr)		return NULL;	return (kprobe_opcode_t *)(((char *)addr) + p->offset);}static int __kprobes __register_kprobe(struct kprobe *p,	unsigned long called_from){	int ret = 0;	struct kprobe *old_p;	struct module *probed_mod;	kprobe_opcode_t *addr;	addr = kprobe_addr(p);	if (!addr)		return -EINVAL;	p->addr = addr;	if (!kernel_text_address((unsigned long) p->addr) ||	    in_kprobes_functions((unsigned long) p->addr))		return -EINVAL;	p->mod_refcounted = 0;	/*	 * Check if are we probing a module.	 */	probed_mod = module_text_address((unsigned long) p->addr);	if (probed_mod) {		struct module *calling_mod = module_text_address(called_from);		/*		 * We must allow modules to probe themself and in this case		 * avoid incrementing the module refcount, so as to allow		 * unloading of self probing modules.		 */		if (calling_mod && calling_mod != probed_mod) {			if (unlikely(!try_module_get(probed_mod)))				return -EINVAL;			p->mod_refcounted = 1;		} else			probed_mod = NULL;	}	p->nmissed = 0;	INIT_LIST_HEAD(&p->list);	mutex_lock(&kprobe_mutex);	old_p = get_kprobe(p->addr);	if (old_p) {		ret = register_aggr_kprobe(old_p, p);		goto out;	}	ret = arch_prepare_kprobe(p);	if (ret)		goto out;	INIT_HLIST_NODE(&p->hlist);	hlist_add_head_rcu(&p->hlist,		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);	if (kprobe_enabled)		arch_arm_kprobe(p);out:	mutex_unlock(&kprobe_mutex);	if (ret && probed_mod)		module_put(probed_mod);	return ret;}/* * Unregister a kprobe without a scheduler synchronization. */static int __kprobes __unregister_kprobe_top(struct kprobe *p){	struct kprobe *old_p, *list_p;	old_p = get_kprobe(p->addr);	if (unlikely(!old_p))		return -EINVAL;	if (p != old_p) {		list_for_each_entry_rcu(list_p, &old_p->list, list)			if (list_p == p)			/* kprobe p is a valid probe */				goto valid_p;		return -EINVAL;	}valid_p:	if (old_p == p ||	    (old_p->pre_handler == aggr_pre_handler &&	     list_is_singular(&old_p->list))) {		/*		 * Only probe on the hash list. Disarm only if kprobes are		 * enabled - otherwise, the breakpoint would already have		 * been removed. We save on flushing icache.		 */		if (kprobe_enabled)			arch_disarm_kprobe(p);		hlist_del_rcu(&old_p->hlist);	} else {		if (p->break_handler)			old_p->break_handler = NULL;		if (p->post_handler) {			list_for_each_entry_rcu(list_p, &old_p->list, list) {				if ((list_p != p) && (list_p->post_handler))					goto noclean;			}			old_p->post_handler = NULL;		}noclean:		list_del_rcu(&p->list);	}	return 0;}static void __kprobes __unregister_kprobe_bottom(struct kprobe *p){	struct module *mod;	struct kprobe *old_p;	if (p->mod_refcounted) {		mod = module_text_address((unsigned long)p->addr);		if (mod)			module_put(mod);	}	if (list_empty(&p->list) || list_is_singular(&p->list)) {		if (!list_empty(&p->list)) {			/* "p" is the last child of an aggr_kprobe */			old_p = list_entry(p->list.next, struct kprobe, list);			list_del(&p->list);			kfree(old_p);		}		arch_remove_kprobe(p);	}}static int __register_kprobes(struct kprobe **kps, int num,	unsigned long called_from){	int i, ret = 0;	if (num <= 0)		return -EINVAL;	for (i = 0; i < num; i++) {		ret = __register_kprobe(kps[i], called_from);		if (ret < 0) {			if (i > 0)				unregister_kprobes(kps, i);			break;		}	}	return ret;}/* * Registration and unregistration functions for kprobe. */int __kprobes register_kprobe(struct kprobe *p){	return __register_kprobes(&p, 1,				  (unsigned long)__builtin_return_address(0));}void __kprobes unregister_kprobe(struct kprobe *p){	unregister_kprobes(&p, 1);}int __kprobes register_kprobes(struct kprobe **kps, int num){	return __register_kprobes(kps, num,				  (unsigned long)__builtin_return_address(0));}void __kprobes unregister_kprobes(struct kprobe **kps, int num){	int i;	if (num <= 0)		return;	mutex_lock(&kprobe_mutex);	for (i = 0; i < num; i++)		if (__unregister_kprobe_top(kps[i]) < 0)			kps[i]->addr = NULL;	mutex_unlock(&kprobe_mutex);	synchronize_sched();	for (i = 0; i < num; i++)		if (kps[i]->addr)			__unregister_kprobe_bottom(kps[i]);}static struct notifier_block kprobe_exceptions_nb = {	.notifier_call = kprobe_exceptions_notify,	.priority = 0x7fffffff /* we need to be notified first */};unsigned long __weak arch_deref_entry_point(void *entry){	return (unsigned long)entry;}static int __register_jprobes(struct jprobe **jps, int num,	unsigned long called_from){	struct jprobe *jp;	int ret = 0, i;	if (num <= 0)		return -EINVAL;	for (i = 0; i < num; i++) {		unsigned long addr;		jp = jps[i];		addr = arch_deref_entry_point(jp->entry);		if (!kernel_text_address(addr))			ret = -EINVAL;		else {			/* Todo: Verify probepoint is a function entry point */			jp->kp.pre_handler = setjmp_pre_handler;			jp->kp.break_handler = longjmp_break_handler;			ret = __register_kprobe(&jp->kp, called_from);		}		if (ret < 0) {			if (i > 0)				unregister_jprobes(jps, i);			break;		}	}	return ret;}int __kprobes register_jprobe(struct jprobe *jp){	return __register_jprobes(&jp, 1,		(unsigned long)__builtin_return_address(0));}void __kprobes unregister_jprobe(struct jprobe *jp){	unregister_jprobes(&jp, 1);}int __kprobes register_jprobes(struct jprobe **jps, int num){	return __register_jprobes(jps, num,		(unsigned long)__builtin_return_address(0));}void __kprobes unregister_jprobes(struct jprobe **jps, int num){	int i;	if (num <= 0)		return;	mutex_lock(&kprobe_mutex);	for (i = 0; i < num; i++)		if (__unregister_kprobe_top(&jps[i]->kp) < 0)			jps[i]->kp.addr = NULL;	mutex_unlock(&kprobe_mutex);	synchronize_sched();	for (i = 0; i < num; i++) {		if (jps[i]->kp.addr)			__unregister_kprobe_bottom(&jps[i]->kp);	}}#ifdef CONFIG_KRETPROBES/* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. */static int __kprobes pre_handler_kretprobe(struct kprobe *p,					   struct pt_regs *regs){	struct kretprobe *rp = container_of(p, struct kretprobe, kp);	unsigned long hash, flags = 0;	struct kretprobe_instance *ri;	/*TODO: consider to only swap the RA after the last pre_handler fired */	hash = hash_ptr(current, KPROBE_HASH_BITS);	spin_lock_irqsave(&rp->lock, flags);	if (!hlist_empty(&rp->free_instances)) {		ri = hlist_entry(rp->free_instances.first,				struct kretprobe_instance, hlist);		hlist_del(&ri->hlist);		spin_unlock_irqrestore(&rp->lock, flags);		ri->rp = rp;		ri->task = current;		if (rp->entry_handler && rp->entry_handler(ri, regs)) {			spin_unlock_irqrestore(&rp->lock, flags);			return 0;		}		arch_prepare_kretprobe(ri, regs);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -