📄 ftrace.c
字号:
/* * Infrastructure for profiling code inserted by 'gcc -pg'. * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> * * Originally ported from the -rt patch by: * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */#include <linux/stop_machine.h>#include <linux/clocksource.h>#include <linux/kallsyms.h>#include <linux/seq_file.h>#include <linux/debugfs.h>#include <linux/hardirq.h>#include <linux/kthread.h>#include <linux/uaccess.h>#include <linux/kprobes.h>#include <linux/ftrace.h>#include <linux/sysctl.h>#include <linux/ctype.h>#include <linux/hash.h>#include <linux/list.h>#include <asm/ftrace.h>#include "trace.h"/* ftrace_enabled is a method to turn ftrace on or off */int ftrace_enabled __read_mostly;static int last_ftrace_enabled;/* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. */static int ftrace_disabled __read_mostly;static DEFINE_SPINLOCK(ftrace_lock);static DEFINE_MUTEX(ftrace_sysctl_lock);static struct ftrace_ops ftrace_list_end __read_mostly ={ .func = ftrace_stub,};static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;static void ftrace_list_func(unsigned long ip, unsigned long parent_ip){ struct ftrace_ops *op = ftrace_list; /* in case someone actually ports this to alpha! */ read_barrier_depends(); while (op != &ftrace_list_end) { /* silly alpha */ read_barrier_depends(); op->func(ip, parent_ip); op = op->next; };}/** * clear_ftrace_function - reset the ftrace function * * This NULLs the ftrace function and in essence stops * tracing. There may be lag */void clear_ftrace_function(void){ ftrace_trace_function = ftrace_stub;}static int __register_ftrace_function(struct ftrace_ops *ops){ /* Should never be called by interrupts */ spin_lock(&ftrace_lock); ops->next = ftrace_list; /* * We are entering ops into the ftrace_list but another * CPU might be walking that list. We need to make sure * the ops->next pointer is valid before another CPU sees * the ops pointer included into the ftrace_list. */ smp_wmb(); ftrace_list = ops; if (ftrace_enabled) { /* * For one func, simply call it directly. * For more than one func, call the chain. */ if (ops->next == &ftrace_list_end) ftrace_trace_function = ops->func; else ftrace_trace_function = ftrace_list_func; } spin_unlock(&ftrace_lock); return 0;}static int __unregister_ftrace_function(struct ftrace_ops *ops){ struct ftrace_ops **p; int ret = 0; spin_lock(&ftrace_lock); /* * If we are removing the last function, then simply point * to the ftrace_stub. */ if (ftrace_list == ops && ops->next == &ftrace_list_end) { ftrace_trace_function = ftrace_stub; ftrace_list = &ftrace_list_end; goto out; } for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break; if (*p != ops) { ret = -1; goto out; } *p = (*p)->next; if (ftrace_enabled) { /* If we only have one func left, then call that directly */ if (ftrace_list == &ftrace_list_end || ftrace_list->next == &ftrace_list_end) ftrace_trace_function = ftrace_list->func; } out: spin_unlock(&ftrace_lock); return ret;}#ifdef CONFIG_DYNAMIC_FTRACEstatic struct task_struct *ftraced_task;enum { FTRACE_ENABLE_CALLS = (1 << 0), FTRACE_DISABLE_CALLS = (1 << 1), FTRACE_UPDATE_TRACE_FUNC = (1 << 2), FTRACE_ENABLE_MCOUNT = (1 << 3), FTRACE_DISABLE_MCOUNT = (1 << 4),};static int ftrace_filtered;static int tracing_on;static int frozen_record_count;static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);static DEFINE_SPINLOCK(ftrace_shutdown_lock);static DEFINE_MUTEX(ftraced_lock);static DEFINE_MUTEX(ftrace_regex_lock);struct ftrace_page { struct ftrace_page *next; unsigned long index; struct dyn_ftrace records[];};#define ENTRIES_PER_PAGE \ ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))/* estimate from running different kernels */#define NR_TO_INIT 10000static struct ftrace_page *ftrace_pages_start;static struct ftrace_page *ftrace_pages;static int ftraced_trigger;static int ftraced_suspend;static int ftraced_stop;static int ftrace_record_suspend;static struct dyn_ftrace *ftrace_free_records;#ifdef CONFIG_KPROBESstatic inline void freeze_record(struct dyn_ftrace *rec){ if (!(rec->flags & FTRACE_FL_FROZEN)) { rec->flags |= FTRACE_FL_FROZEN; frozen_record_count++; }}static inline void unfreeze_record(struct dyn_ftrace *rec){ if (rec->flags & FTRACE_FL_FROZEN) { rec->flags &= ~FTRACE_FL_FROZEN; frozen_record_count--; }}static inline int record_frozen(struct dyn_ftrace *rec){ return rec->flags & FTRACE_FL_FROZEN;}#else# define freeze_record(rec) ({ 0; })# define unfreeze_record(rec) ({ 0; })# define record_frozen(rec) ({ 0; })#endif /* CONFIG_KPROBES */int skip_trace(unsigned long ip){ unsigned long fl; struct dyn_ftrace *rec; struct hlist_node *t; struct hlist_head *head; if (frozen_record_count == 0) return 0; head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; hlist_for_each_entry_rcu(rec, t, head, node) { if (rec->ip == ip) { if (record_frozen(rec)) { if (rec->flags & FTRACE_FL_FAILED) return 1; if (!(rec->flags & FTRACE_FL_CONVERTED)) return 1; if (!tracing_on || !ftrace_enabled) return 1; if (ftrace_filtered) { fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE); if (!fl || (fl & FTRACE_FL_NOTRACE)) return 1; } } break; } } return 0;}static inline intftrace_ip_in_hash(unsigned long ip, unsigned long key){ struct dyn_ftrace *p; struct hlist_node *t; int found = 0; hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { if (p->ip == ip) { found = 1; break; } } return found;}static inline voidftrace_add_hash(struct dyn_ftrace *node, unsigned long key){ hlist_add_head_rcu(&node->node, &ftrace_hash[key]);}/* called from kstop_machine */static inline void ftrace_del_hash(struct dyn_ftrace *node){ hlist_del(&node->node);}static void ftrace_free_rec(struct dyn_ftrace *rec){ /* no locking, only called from kstop_machine */ rec->ip = (unsigned long)ftrace_free_records; ftrace_free_records = rec; rec->flags |= FTRACE_FL_FREE;}static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip){ struct dyn_ftrace *rec; /* First check for freed records */ if (ftrace_free_records) { rec = ftrace_free_records; if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { WARN_ON_ONCE(1); ftrace_free_records = NULL; ftrace_disabled = 1; ftrace_enabled = 0; return NULL; } ftrace_free_records = (void *)rec->ip; memset(rec, 0, sizeof(*rec)); return rec; } if (ftrace_pages->index == ENTRIES_PER_PAGE) { if (!ftrace_pages->next) return NULL; ftrace_pages = ftrace_pages->next; } return &ftrace_pages->records[ftrace_pages->index++];}static voidftrace_record_ip(unsigned long ip){ struct dyn_ftrace *node; unsigned long flags; unsigned long key; int resched; int atomic; int cpu; if (!ftrace_enabled || ftrace_disabled) return; resched = need_resched(); preempt_disable_notrace(); /* * We simply need to protect against recursion. * Use the the raw version of smp_processor_id and not * __get_cpu_var which can call debug hooks that can * cause a recursive crash here. */ cpu = raw_smp_processor_id(); per_cpu(ftrace_shutdown_disable_cpu, cpu)++; if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) goto out; if (unlikely(ftrace_record_suspend)) goto out; key = hash_long(ip, FTRACE_HASHBITS); WARN_ON_ONCE(key >= FTRACE_HASHSIZE); if (ftrace_ip_in_hash(ip, key)) goto out; atomic = irqs_disabled(); spin_lock_irqsave(&ftrace_shutdown_lock, flags); /* This ip may have hit the hash before the lock */ if (ftrace_ip_in_hash(ip, key)) goto out_unlock; node = ftrace_alloc_dyn_node(ip); if (!node) goto out_unlock; node->ip = ip; ftrace_add_hash(node, key); ftraced_trigger = 1; out_unlock: spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); out: per_cpu(ftrace_shutdown_disable_cpu, cpu)--; /* prevent recursion with scheduler */ if (resched) preempt_enable_no_resched_notrace(); else preempt_enable_notrace();}#define FTRACE_ADDR ((long)(ftrace_caller))static int__ftrace_replace_code(struct dyn_ftrace *rec, unsigned char *old, unsigned char *new, int enable){ unsigned long ip, fl; ip = rec->ip; if (ftrace_filtered && enable) { /* * If filtering is on: * * If this record is set to be filtered and * is enabled then do nothing. * * If this record is set to be filtered and * it is not enabled, enable it. * * If this record is not set to be filtered * and it is not enabled do nothing. * * If this record is set not to trace then * do nothing. * * If this record is set not to trace and * it is enabled then disable it. * * If this record is not set to be filtered and * it is enabled, disable it. */ fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || !fl || (fl == FTRACE_FL_NOTRACE)) return 0; /* * If it is enabled disable it, * otherwise enable it! */ if (fl & FTRACE_FL_ENABLED) { /* swap new and old */ new = old; old = ftrace_call_replace(ip, FTRACE_ADDR); rec->flags &= ~FTRACE_FL_ENABLED; } else { new = ftrace_call_replace(ip, FTRACE_ADDR); rec->flags |= FTRACE_FL_ENABLED; } } else { if (enable) { /* * If this record is set not to trace and is * not enabled, do nothing. */ fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); if (fl == FTRACE_FL_NOTRACE) return 0; new = ftrace_call_replace(ip, FTRACE_ADDR); } else old = ftrace_call_replace(ip, FTRACE_ADDR); if (enable) { if (rec->flags & FTRACE_FL_ENABLED) return 0; rec->flags |= FTRACE_FL_ENABLED; } else { if (!(rec->flags & FTRACE_FL_ENABLED)) return 0; rec->flags &= ~FTRACE_FL_ENABLED; } } return ftrace_modify_code(ip, old, new);}static void ftrace_replace_code(int enable){ int i, failed; unsigned char *new = NULL, *old = NULL; struct dyn_ftrace *rec; struct ftrace_page *pg; if (enable) old = ftrace_nop_replace(); else new = ftrace_nop_replace(); for (pg = ftrace_pages_start; pg; pg = pg->next) { for (i = 0; i < pg->index; i++) { rec = &pg->records[i]; /* don't modify code that has already faulted */ if (rec->flags & FTRACE_FL_FAILED) continue; /* ignore updates to this record's mcount site */ if (get_kprobe((void *)rec->ip)) { freeze_record(rec); continue; } else { unfreeze_record(rec); } failed = __ftrace_replace_code(rec, old, new, enable); if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { rec->flags |= FTRACE_FL_FAILED; if ((system_state == SYSTEM_BOOTING) || !core_kernel_text(rec->ip)) { ftrace_del_hash(rec); ftrace_free_rec(rec); } } } }}static void ftrace_shutdown_replenish(void){ if (ftrace_pages->next) return; /* allocate another page */ ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);}static intftrace_code_disable(struct dyn_ftrace *rec){ unsigned long ip; unsigned char *nop, *call; int failed; ip = rec->ip; nop = ftrace_nop_replace(); call = ftrace_call_replace(ip, MCOUNT_ADDR); failed = ftrace_modify_code(ip, call, nop); if (failed) { rec->flags |= FTRACE_FL_FAILED; return 0; } return 1;}static int __ftrace_update_code(void *ignore);static int __ftrace_modify_code(void *data){ unsigned long addr; int *command = data; if (*command & FTRACE_ENABLE_CALLS) { /* * Update any recorded ips now that we have the * machine stopped */ __ftrace_update_code(NULL); ftrace_replace_code(1); tracing_on = 1; } else if (*command & FTRACE_DISABLE_CALLS) { ftrace_replace_code(0); tracing_on = 0; } if (*command & FTRACE_UPDATE_TRACE_FUNC) ftrace_update_ftrace_func(ftrace_trace_function);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -