📄 trace.c
字号:
struct tracer *saved_tracer = current_trace; struct trace_array_cpu *data; struct trace_array *tr = &global_trace; int saved_ctrl = tr->ctrl; int i; /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ for_each_tracing_cpu(i) { data = tr->data[i]; if (!head_page(data)) continue; tracing_reset(data); } current_trace = type; tr->ctrl = 0; /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ current_trace = saved_tracer; tr->ctrl = saved_ctrl; if (ret) { printk(KERN_CONT "FAILED!\n"); goto out; } /* Only reset on passing, to avoid touching corrupted buffers */ for_each_tracing_cpu(i) { data = tr->data[i]; if (!head_page(data)) continue; tracing_reset(data); } printk(KERN_CONT "PASSED\n"); }#endif type->next = trace_types; trace_types = type; len = strlen(type->name); if (len > max_tracer_type_len) max_tracer_type_len = len; out: mutex_unlock(&trace_types_lock); return ret;}void unregister_tracer(struct tracer *type){ struct tracer **t; int len; mutex_lock(&trace_types_lock); for (t = &trace_types; *t; t = &(*t)->next) { if (*t == type) goto found; } pr_info("Trace %s not registered\n", type->name); goto out; found: *t = (*t)->next; if (strlen(type->name) != max_tracer_type_len) goto out; max_tracer_type_len = 0; for (t = &trace_types; *t; t = &(*t)->next) { len = strlen((*t)->name); if (len > max_tracer_type_len) max_tracer_type_len = len; } out: mutex_unlock(&trace_types_lock);}void tracing_reset(struct trace_array_cpu *data){ data->trace_idx = 0; data->overrun = 0; data->trace_head = data->trace_tail = head_page(data); data->trace_head_idx = 0; data->trace_tail_idx = 0;}#define SAVED_CMDLINES 128static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];static unsigned map_cmdline_to_pid[SAVED_CMDLINES];static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];static int cmdline_idx;static DEFINE_SPINLOCK(trace_cmdline_lock);/* temporary disable recording */atomic_t trace_record_cmdline_disabled __read_mostly;static void trace_init_cmdlines(void){ memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); cmdline_idx = 0;}void trace_stop_cmdline_recording(void);static void trace_save_cmdline(struct task_struct *tsk){ unsigned map; unsigned idx; if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) return; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!spin_trylock(&trace_cmdline_lock)) return; idx = map_pid_to_cmdline[tsk->pid]; if (idx >= SAVED_CMDLINES) { idx = (cmdline_idx + 1) % SAVED_CMDLINES; map = map_cmdline_to_pid[idx]; if (map <= PID_MAX_DEFAULT) map_pid_to_cmdline[map] = (unsigned)-1; map_pid_to_cmdline[tsk->pid] = idx; cmdline_idx = idx; } memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); spin_unlock(&trace_cmdline_lock);}static char *trace_find_cmdline(int pid){ char *cmdline = "<...>"; unsigned map; if (!pid) return "<idle>"; if (pid > PID_MAX_DEFAULT) goto out; map = map_pid_to_cmdline[pid]; if (map >= SAVED_CMDLINES) goto out; cmdline = saved_cmdlines[map]; out: return cmdline;}void tracing_record_cmdline(struct task_struct *tsk){ if (atomic_read(&trace_record_cmdline_disabled)) return; trace_save_cmdline(tsk);}static inline struct list_head *trace_next_list(struct trace_array_cpu *data, struct list_head *next){ /* * Roundrobin - but skip the head (which is not a real page): */ next = next->next; if (unlikely(next == &data->trace_pages)) next = next->next; BUG_ON(next == &data->trace_pages); return next;}static inline void *trace_next_page(struct trace_array_cpu *data, void *addr){ struct list_head *next; struct page *page; page = virt_to_page(addr); next = trace_next_list(data, &page->lru); page = list_entry(next, struct page, lru); return page_address(page);}static inline struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data){ unsigned long idx, idx_next; struct trace_entry *entry; data->trace_idx++; idx = data->trace_head_idx; idx_next = idx + 1; BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE); entry = data->trace_head + idx * TRACE_ENTRY_SIZE; if (unlikely(idx_next >= ENTRIES_PER_PAGE)) { data->trace_head = trace_next_page(data, data->trace_head); idx_next = 0; } if (data->trace_head == data->trace_tail && idx_next == data->trace_tail_idx) { /* overrun */ data->overrun++; data->trace_tail_idx++; if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { data->trace_tail = trace_next_page(data, data->trace_tail); data->trace_tail_idx = 0; } } data->trace_head_idx = idx_next; return entry;}static inline voidtracing_generic_entry_update(struct trace_entry *entry, unsigned long flags){ struct task_struct *tsk = current; unsigned long pc; pc = preempt_count(); entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->t = ftrace_now(raw_smp_processor_id()); entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);}voidtrace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags){ struct trace_entry *entry; unsigned long irq_flags; raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); entry->type = TRACE_FN; entry->fn.ip = ip; entry->fn.parent_ip = parent_ip; __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags);}voidftrace(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags){ if (likely(!atomic_read(&data->disabled))) trace_function(tr, data, ip, parent_ip, flags);}#ifdef CONFIG_MMIOTRACEvoid __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, struct mmiotrace_rw *rw){ struct trace_entry *entry; unsigned long irq_flags; raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, 0); entry->type = TRACE_MMIO_RW; entry->mmiorw = *rw; __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); trace_wake_up();}void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data, struct mmiotrace_map *map){ struct trace_entry *entry; unsigned long irq_flags; raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, 0); entry->type = TRACE_MMIO_MAP; entry->mmiomap = *map; __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); trace_wake_up();}#endifvoid __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip){ struct trace_entry *entry; struct stack_trace trace; if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); entry->type = TRACE_STACK; memset(&entry->stack, 0, sizeof(entry->stack)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = skip; trace.entries = entry->stack.caller; save_stack_trace(&trace);}void__trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3){ struct trace_array_cpu *data = __data; struct trace_array *tr = __tr; struct trace_entry *entry; unsigned long irq_flags; raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, 0); entry->type = TRACE_SPECIAL; entry->special.arg1 = arg1; entry->special.arg2 = arg2; entry->special.arg3 = arg3; __trace_stack(tr, data, irq_flags, 4); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); trace_wake_up();}voidtracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags){ struct trace_entry *entry; unsigned long irq_flags; raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); entry->type = TRACE_CTX; entry->ctx.prev_pid = prev->pid; entry->ctx.prev_prio = prev->prio; entry->ctx.prev_state = prev->state; entry->ctx.next_pid = next->pid; entry->ctx.next_prio = next->prio; entry->ctx.next_state = next->state; __trace_stack(tr, data, flags, 5); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags);}voidtracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *curr, unsigned long flags){ struct trace_entry *entry; unsigned long irq_flags; raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); tracing_generic_entry_update(entry, flags); entry->type = TRACE_WAKE; entry->ctx.prev_pid = curr->pid; entry->ctx.prev_prio = curr->prio; entry->ctx.prev_state = curr->state; entry->ctx.next_pid = wakee->pid; entry->ctx.next_prio = wakee->prio; entry->ctx.next_state = wakee->state; __trace_stack(tr, data, flags, 6); __raw_spin_unlock(&data->lock); raw_local_irq_restore(irq_flags); trace_wake_up();}voidftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3){ struct trace_array *tr = &global_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) return; local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) __trace_special(tr, data, arg1, arg2, arg3); atomic_dec(&data->disabled); local_irq_restore(flags);}#ifdef CONFIG_FTRACEstatic voidfunction_trace_call(unsigned long ip, unsigned long parent_ip){ struct trace_array *tr = &global_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; if (unlikely(!ftrace_function_enabled)) return; if (skip_trace(ip)) return; local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, data, ip, parent_ip, flags); atomic_dec(&data->disabled); local_irq_restore(flags);}static struct ftrace_ops trace_ops __read_mostly ={ .func = function_trace_call,};void tracing_start_function_trace(void){ ftrace_function_enabled = 0; register_ftrace_function(&trace_ops); if (tracer_enabled) ftrace_function_enabled = 1;}void tracing_stop_function_trace(void){ ftrace_function_enabled = 0; unregister_ftrace_function(&trace_ops);}#endifenum trace_file_type { TRACE_FILE_LAT_FMT = 1,};static struct trace_entry *trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, struct trace_iterator *iter, int cpu){ struct page *page; struct trace_entry *array; if (iter->next_idx[cpu] >= tr->entries || iter->next_idx[cpu] >= data->trace_idx || (data->trace_head == data->trace_tail && data->trace_head_idx == data->trace_tail_idx)) return NULL; if (!iter->next_page[cpu]) { /* Initialize the iterator for this cpu trace buffer */ WARN_ON(!data->trace_tail); page = virt_to_page(data->trace_tail); iter->next_page[cpu] = &page->lru; iter->next_page_idx[cpu] = data->trace_tail_idx; } page = list_entry(iter->next_page[cpu], struct page, lru); BUG_ON(&data->trace_pages == &page->lru); array = page_address(page); WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); return &array[iter->next_page_idx[cpu]];}static struct trace_entry *find_next_entry(struct trace_iterator *iter, int *ent_cpu){ struct trace_array *tr = iter->tr; struct trace_entry *ent, *next = NULL; int next_cpu = -1; int cpu; for_each_tracing_cpu(cpu) { if (!head_page(tr->data[cpu])) continue; ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ent->t < next->t)) { next = ent; next_cpu = cpu; } } if (ent_cpu) *ent_cpu = next_cpu; return next;}static void trace_iterator_increment(struct trace_iterator *iter){ iter->idx++; iter->next_idx[iter->cpu]++; iter->next_page_idx[iter->cpu]++; if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) { struct trace_array_cpu *data = iter->tr->data[iter->cpu]; iter->next_page_idx[iter->cpu] = 0; iter->next_page[iter->cpu] = trace_next_list(data, iter->next_page[iter->cpu]); }}static void trace_consume(struct trace_iterator *iter)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -