📄 ftrace.c
字号:
if (*command & FTRACE_ENABLE_MCOUNT) { addr = (unsigned long)ftrace_record_ip; ftrace_mcount_set(&addr); } else if (*command & FTRACE_DISABLE_MCOUNT) { addr = (unsigned long)ftrace_stub; ftrace_mcount_set(&addr); } return 0;}static void ftrace_run_update_code(int command){ stop_machine(__ftrace_modify_code, &command, NULL);}void ftrace_disable_daemon(void){ /* Stop the daemon from calling kstop_machine */ mutex_lock(&ftraced_lock); ftraced_stop = 1; mutex_unlock(&ftraced_lock); ftrace_force_update();}void ftrace_enable_daemon(void){ mutex_lock(&ftraced_lock); ftraced_stop = 0; mutex_unlock(&ftraced_lock); ftrace_force_update();}static ftrace_func_t saved_ftrace_func;static void ftrace_startup(void){ int command = 0; if (unlikely(ftrace_disabled)) return; mutex_lock(&ftraced_lock); ftraced_suspend++; if (ftraced_suspend == 1) command |= FTRACE_ENABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; command |= FTRACE_UPDATE_TRACE_FUNC; } if (!command || !ftrace_enabled) goto out; ftrace_run_update_code(command); out: mutex_unlock(&ftraced_lock);}static void ftrace_shutdown(void){ int command = 0; if (unlikely(ftrace_disabled)) return; mutex_lock(&ftraced_lock); ftraced_suspend--; if (!ftraced_suspend) command |= FTRACE_DISABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; command |= FTRACE_UPDATE_TRACE_FUNC; } if (!command || !ftrace_enabled) goto out; ftrace_run_update_code(command); out: mutex_unlock(&ftraced_lock);}static void ftrace_startup_sysctl(void){ int command = FTRACE_ENABLE_MCOUNT; if (unlikely(ftrace_disabled)) return; mutex_lock(&ftraced_lock); /* Force update next time */ saved_ftrace_func = NULL; /* ftraced_suspend is true if we want ftrace running */ if (ftraced_suspend) command |= FTRACE_ENABLE_CALLS; ftrace_run_update_code(command); mutex_unlock(&ftraced_lock);}static void ftrace_shutdown_sysctl(void){ int command = FTRACE_DISABLE_MCOUNT; if (unlikely(ftrace_disabled)) return; mutex_lock(&ftraced_lock); /* ftraced_suspend is true if ftrace is running */ if (ftraced_suspend) command |= FTRACE_DISABLE_CALLS; ftrace_run_update_code(command); mutex_unlock(&ftraced_lock);}static cycle_t ftrace_update_time;static unsigned long ftrace_update_cnt;unsigned long ftrace_update_tot_cnt;static int __ftrace_update_code(void *ignore){ int i, save_ftrace_enabled; cycle_t start, stop; struct dyn_ftrace *p; struct hlist_node *t, *n; struct hlist_head *head, temp_list; /* Don't be recording funcs now */ ftrace_record_suspend++; save_ftrace_enabled = ftrace_enabled; ftrace_enabled = 0; start = ftrace_now(raw_smp_processor_id()); ftrace_update_cnt = 0; /* No locks needed, the machine is stopped! */ for (i = 0; i < FTRACE_HASHSIZE; i++) { INIT_HLIST_HEAD(&temp_list); head = &ftrace_hash[i]; /* all CPUS are stopped, we are safe to modify code */ hlist_for_each_entry_safe(p, t, n, head, node) { /* Skip over failed records which have not been * freed. */ if (p->flags & FTRACE_FL_FAILED) continue; /* Unconverted records are always at the head of the * hash bucket. Once we encounter a converted record, * simply skip over to the next bucket. Saves ftraced * some processor cycles (ftrace does its bid for * global warming :-p ). */ if (p->flags & (FTRACE_FL_CONVERTED)) break; /* Ignore updates to this record's mcount site. * Reintroduce this record at the head of this * bucket to attempt to "convert" it again if * the kprobe on it is unregistered before the * next run. */ if (get_kprobe((void *)p->ip)) { ftrace_del_hash(p); INIT_HLIST_NODE(&p->node); hlist_add_head(&p->node, &temp_list); freeze_record(p); continue; } else { unfreeze_record(p); } /* convert record (i.e, patch mcount-call with NOP) */ if (ftrace_code_disable(p)) { p->flags |= FTRACE_FL_CONVERTED; ftrace_update_cnt++; } else { if ((system_state == SYSTEM_BOOTING) || !core_kernel_text(p->ip)) { ftrace_del_hash(p); ftrace_free_rec(p); } } } hlist_for_each_entry_safe(p, t, n, &temp_list, node) { hlist_del(&p->node); INIT_HLIST_NODE(&p->node); hlist_add_head(&p->node, head); } } stop = ftrace_now(raw_smp_processor_id()); ftrace_update_time = stop - start; ftrace_update_tot_cnt += ftrace_update_cnt; ftraced_trigger = 0; ftrace_enabled = save_ftrace_enabled; ftrace_record_suspend--; return 0;}static int ftrace_update_code(void){ if (unlikely(ftrace_disabled) || !ftrace_enabled || !ftraced_trigger) return 0; stop_machine(__ftrace_update_code, NULL, NULL); return 1;}static int ftraced(void *ignore){ unsigned long usecs; while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); /* check once a second */ schedule_timeout(HZ); if (unlikely(ftrace_disabled)) continue; mutex_lock(&ftrace_sysctl_lock); mutex_lock(&ftraced_lock); if (!ftraced_suspend && !ftraced_stop && ftrace_update_code()) { usecs = nsecs_to_usecs(ftrace_update_time); if (ftrace_update_tot_cnt > 100000) { ftrace_update_tot_cnt = 0; pr_info("hm, dftrace overflow: %lu change%s" " (%lu total) in %lu usec%s\n", ftrace_update_cnt, ftrace_update_cnt != 1 ? "s" : "", ftrace_update_tot_cnt, usecs, usecs != 1 ? "s" : ""); ftrace_disabled = 1; WARN_ON_ONCE(1); } } mutex_unlock(&ftraced_lock); mutex_unlock(&ftrace_sysctl_lock); ftrace_shutdown_replenish(); } __set_current_state(TASK_RUNNING); return 0;}static int __init ftrace_dyn_table_alloc(void){ struct ftrace_page *pg; int cnt; int i; /* allocate a few pages */ ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); if (!ftrace_pages_start) return -1; /* * Allocate a few more pages. * * TODO: have some parser search vmlinux before * final linking to find all calls to ftrace. * Then we can: * a) know how many pages to allocate. * and/or * b) set up the table then. * * The dynamic code is still necessary for * modules. */ pg = ftrace_pages = ftrace_pages_start; cnt = NR_TO_INIT / ENTRIES_PER_PAGE; for (i = 0; i < cnt; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); /* If we fail, we'll try later anyway */ if (!pg->next) break; pg = pg->next; } return 0;}enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_CONT = (1 << 1), FTRACE_ITER_NOTRACE = (1 << 2), FTRACE_ITER_FAILURES = (1 << 3),};#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */struct ftrace_iterator { loff_t pos; struct ftrace_page *pg; unsigned idx; unsigned flags; unsigned char buffer[FTRACE_BUFF_MAX+1]; unsigned buffer_idx; unsigned filtered;};static void *t_next(struct seq_file *m, void *v, loff_t *pos){ struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = NULL; (*pos)++; retry: if (iter->idx >= iter->pg->index) { if (iter->pg->next) { iter->pg = iter->pg->next; iter->idx = 0; goto retry; } } else { rec = &iter->pg->records[iter->idx++]; if ((!(iter->flags & FTRACE_ITER_FAILURES) && (rec->flags & FTRACE_FL_FAILED)) || ((iter->flags & FTRACE_ITER_FAILURES) && (!(rec->flags & FTRACE_FL_FAILED) || (rec->flags & FTRACE_FL_FREE))) || ((iter->flags & FTRACE_ITER_FILTER) && !(rec->flags & FTRACE_FL_FILTER)) || ((iter->flags & FTRACE_ITER_NOTRACE) && !(rec->flags & FTRACE_FL_NOTRACE))) { rec = NULL; goto retry; } } iter->pos = *pos; return rec;}static void *t_start(struct seq_file *m, loff_t *pos){ struct ftrace_iterator *iter = m->private; void *p = NULL; loff_t l = -1; if (*pos != iter->pos) { for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) ; } else { l = *pos; p = t_next(m, p, &l); } return p;}static void t_stop(struct seq_file *m, void *p){}static int t_show(struct seq_file *m, void *v){ struct dyn_ftrace *rec = v; char str[KSYM_SYMBOL_LEN]; if (!rec) return 0; kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); seq_printf(m, "%s\n", str); return 0;}static struct seq_operations show_ftrace_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show,};static intftrace_avail_open(struct inode *inode, struct file *file){ struct ftrace_iterator *iter; int ret; if (unlikely(ftrace_disabled)) return -ENODEV; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; iter->pg = ftrace_pages_start; iter->pos = -1; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = iter; } else { kfree(iter); } return ret;}int ftrace_avail_release(struct inode *inode, struct file *file){ struct seq_file *m = (struct seq_file *)file->private_data; struct ftrace_iterator *iter = m->private; seq_release(inode, file); kfree(iter); return 0;}static intftrace_failures_open(struct inode *inode, struct file *file){ int ret; struct seq_file *m; struct ftrace_iterator *iter; ret = ftrace_avail_open(inode, file); if (!ret) { m = (struct seq_file *)file->private_data; iter = (struct ftrace_iterator *)m->private; iter->flags = FTRACE_ITER_FAILURES; } return ret;}static void ftrace_filter_reset(int enable){ struct ftrace_page *pg; struct dyn_ftrace *rec; unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; unsigned i; /* keep kstop machine from running */ preempt_disable(); if (enable) ftrace_filtered = 0; pg = ftrace_pages_start; while (pg) { for (i = 0; i < pg->index; i++) { rec = &pg->records[i]; if (rec->flags & FTRACE_FL_FAILED) continue; rec->flags &= ~type; } pg = pg->next; } preempt_enable();}static intftrace_regex_open(struct inode *inode, struct file *file, int enable){ struct ftrace_iterator *iter; int ret = 0; if (unlikely(ftrace_disabled)) return -ENODEV; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; mutex_lock(&ftrace_regex_lock); if ((file->f_mode & FMODE_WRITE) && !(file->f_flags & O_APPEND)) ftrace_filter_reset(enable); if (file->f_mode & FMODE_READ) { iter->pg = ftrace_pages_start; iter->pos = -1; iter->flags = enable ? FTRACE_ITER_FILTER : FTRACE_ITER_NOTRACE; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = iter; } else kfree(iter); } else file->private_data = iter; mutex_unlock(&ftrace_regex_lock); return ret;}static intftrace_filter_open(struct inode *inode, struct file *file){ return ftrace_regex_open(inode, file, 1);}static intftrace_notrace_open(struct inode *inode, struct file *file){ return ftrace_regex_open(inode, file, 0);}static ssize_tftrace_regex_read(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos){ if (file->f_mode & FMODE_READ) return seq_read(file, ubuf, cnt, ppos); else return -EPERM;}static loff_tftrace_regex_lseek(struct file *file, loff_t offset, int origin){ loff_t ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, origin); else file->f_pos = ret = 1; return ret;}enum { MATCH_FULL, MATCH_FRONT_ONLY, MATCH_MIDDLE_ONLY, MATCH_END_ONLY,};static voidftrace_match(unsigned char *buff, int len, int enable){ char str[KSYM_SYMBOL_LEN]; char *search = NULL; struct ftrace_page *pg; struct dyn_ftrace *rec; int type = MATCH_FULL; unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; unsigned i, match = 0, search_len = 0; for (i = 0; i < len; i++) { if (buff[i] == '*') { if (!i) { search = buff + i + 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -