📄 trace.c
字号:
if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; val = !!val; mutex_lock(&trace_types_lock); if (tr->ctrl ^ val) { if (val) tracer_enabled = 1; else tracer_enabled = 0; tr->ctrl = val; if (current_trace && current_trace->ctrl_update) current_trace->ctrl_update(tr); } mutex_unlock(&trace_types_lock); filp->f_pos += cnt; return cnt;}static ssize_ttracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos){ char buf[max_tracer_type_len+2]; int r; mutex_lock(&trace_types_lock); if (current_trace) r = sprintf(buf, "%s\n", current_trace->name); else r = sprintf(buf, "\n"); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);}static ssize_ttracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos){ struct trace_array *tr = &global_trace; struct tracer *t; char buf[max_tracer_type_len+1]; int i; if (cnt > max_tracer_type_len) cnt = max_tracer_type_len; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; mutex_lock(&trace_types_lock); for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t || t == current_trace) goto out; if (current_trace && current_trace->reset) current_trace->reset(tr); current_trace = t; if (t->init) t->init(tr); out: mutex_unlock(&trace_types_lock); filp->f_pos += cnt; return cnt;}static ssize_ttracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos){ unsigned long *ptr = filp->private_data; char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);}static ssize_ttracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos){ long *ptr = filp->private_data; char buf[64]; long val; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; *ptr = val * 1000; return cnt;}static atomic_t tracing_reader;static int tracing_open_pipe(struct inode *inode, struct file *filp){ struct trace_iterator *iter; if (tracing_disabled) return -ENODEV; /* We only allow for reader of the pipe */ if (atomic_inc_return(&tracing_reader) != 1) { atomic_dec(&tracing_reader); return -EBUSY; } /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; mutex_lock(&trace_types_lock); iter->tr = &global_trace; iter->trace = current_trace; filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); mutex_unlock(&trace_types_lock); return 0;}static int tracing_release_pipe(struct inode *inode, struct file *file){ struct trace_iterator *iter = file->private_data; kfree(iter); atomic_dec(&tracing_reader); return 0;}static unsigned inttracing_poll_pipe(struct file *filp, poll_table *poll_table){ struct trace_iterator *iter = filp->private_data; if (trace_flags & TRACE_ITER_BLOCK) { /* * Always select as readable when in blocking mode */ return POLLIN | POLLRDNORM; } else { if (!trace_empty(iter)) return POLLIN | POLLRDNORM; poll_wait(filp, &trace_wait, poll_table); if (!trace_empty(iter)) return POLLIN | POLLRDNORM; return 0; }}/* * Consumer reader. */static ssize_ttracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos){ struct trace_iterator *iter = filp->private_data; struct trace_array_cpu *data; static cpumask_t mask; unsigned long flags;#ifdef CONFIG_FTRACE int ftrace_save;#endif int cpu; ssize_t sret; /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) return sret; sret = 0; trace_seq_reset(&iter->seq); mutex_lock(&trace_types_lock); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { sret = -EAGAIN; goto out; } /* * This is a make-shift waitqueue. The reason we don't use * an actual wait queue is because: * 1) we only ever have one waiter * 2) the tracing, traces all functions, we don't want * the overhead of calling wake_up and friends * (and tracing them too) * Anyway, this is really very primitive wakeup. */ set_current_state(TASK_INTERRUPTIBLE); iter->tr->waiter = current; mutex_unlock(&trace_types_lock); /* sleep for 100 msecs, and try again. */ schedule_timeout(HZ/10); mutex_lock(&trace_types_lock); iter->tr->waiter = NULL; if (signal_pending(current)) { sret = -EINTR; goto out; } if (iter->trace != current_trace) goto out; /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_enabled && iter->pos) break; continue; } /* stop when tracing is finished */ if (trace_empty(iter)) goto out; if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter->pos = -1; /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ cpus_clear(mask); local_irq_save(flags);#ifdef CONFIG_FTRACE ftrace_save = ftrace_enabled; ftrace_enabled = 0;#endif smp_wmb(); for_each_tracing_cpu(cpu) { data = iter->tr->data[cpu]; if (!head_page(data) || !data->trace_idx) continue; atomic_inc(&data->disabled); cpu_set(cpu, mask); } for_each_cpu_mask(cpu, mask) { data = iter->tr->data[cpu]; __raw_spin_lock(&data->lock); if (data->overrun > iter->last_overrun[cpu]) iter->overrun[cpu] += data->overrun - iter->last_overrun[cpu]; iter->last_overrun[cpu] = data->overrun; } while (find_next_entry_inc(iter) != NULL) { int ret; int len = iter->seq.len; ret = print_trace_line(iter); if (!ret) { /* don't print partial lines */ iter->seq.len = len; break; } trace_consume(iter); if (iter->seq.len >= cnt) break; } for_each_cpu_mask(cpu, mask) { data = iter->tr->data[cpu]; __raw_spin_unlock(&data->lock); } for_each_cpu_mask(cpu, mask) { data = iter->tr->data[cpu]; atomic_dec(&data->disabled); }#ifdef CONFIG_FTRACE ftrace_enabled = ftrace_save;#endif local_irq_restore(flags); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= iter->seq.len) trace_seq_reset(&iter->seq); if (sret == -EBUSY) sret = 0;out: mutex_unlock(&trace_types_lock); return sret;}static ssize_ttracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos){ struct trace_array *tr = filp->private_data; char buf[64]; int r; r = sprintf(buf, "%lu\n", tr->entries); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);}static ssize_ttracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos){ unsigned long val; char buf[64]; int i, ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; mutex_lock(&trace_types_lock); if (current_trace != &no_tracer) { cnt = -EBUSY; pr_info("ftrace: set current_tracer to none" " before modifying buffer size\n"); goto out; } if (val > global_trace.entries) { long pages_requested; unsigned long freeable_pages; /* make sure we have enough memory before mapping */ pages_requested = (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE; /* account for each buffer (and max_tr) */ pages_requested *= tracing_nr_buffers * 2; /* Check for overflow */ if (pages_requested < 0) { cnt = -ENOMEM; goto out; } freeable_pages = determine_dirtyable_memory(); /* we only allow to request 1/4 of useable memory */ if (pages_requested > ((freeable_pages + tracing_pages_allocated) / 4)) { cnt = -ENOMEM; goto out; } while (global_trace.entries < val) { if (trace_alloc_page()) { cnt = -ENOMEM; goto out; } /* double check that we don't go over the known pages */ if (tracing_pages_allocated > pages_requested) break; } } else { /* include the number of entries in val (inc of page entries) */ while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1)) trace_free_page(); } /* check integrity */ for_each_tracing_cpu(i) check_pages(global_trace.data[i]); filp->f_pos += cnt; /* If check pages failed, return ENOMEM */ if (tracing_disabled) cnt = -ENOMEM; out: max_tr.entries = global_trace.entries; mutex_unlock(&trace_types_lock); return cnt;}static struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write,};static struct file_operations tracing_ctrl_fops = { .open = tracing_open_generic, .read = tracing_ctrl_read, .write = tracing_ctrl_write,};static struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write,};static struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .release = tracing_release_pipe,};static struct file_operations tracing_entries_fops = { .open = tracing_open_generic, .read = tracing_entries_read, .write = tracing_entries_write,};#ifdef CONFIG_DYNAMIC_FTRACEstatic ssize_ttracing_read_long(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos){ unsigned long *p = filp->private_data; char buf[64]; int r; r = sprintf(buf, "%ld\n", *p); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);}static struct file_operations tracing_read_long_fops = { .open = tracing_open_generic, .read = tracing_read_long,};#endifstatic struct dentry *d_tracer;struct dentry *tracing_init_dentry(void){ static int once; if (d_tracer) return d_tracer; d_tracer = debugfs_create_dir("tracing", NULL); if (!d_tracer && !once) { once = 1; pr_warning("Could not create debugfs directory 'tracing'\n"); return NULL; } return d_tracer;}#ifdef CONFIG_FTRACE_SELFTEST/* Let selftest have access to static functions in this file */#include "trace_selftest.c"#endifstatic __init void tracer_init_debugfs(void){ struct dentry *d_tracer; struct dentry *entry; d_tracer = tracing_init_dentry(); entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, &global_trace, &tracing_ctrl_fops); if (!entry) pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, NULL, &tracing_iter_fops); if (!entry) pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); entry = debugfs_create_fil
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -