From a7603ff4b5f7e26e67af82a4c3d05eeeb8d7b160 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 6 Aug 2012 16:24:11 -0400 Subject: tracing: Replace the static global per_cpu arrays with allocated per_cpu The global and max-tr currently use static per_cpu arrays for the CPU data descriptors. But in order to get new allocated trace_arrays, they need to be allocated per_cpu arrays. Instead of using the static arrays, switch the global and max-tr to use allocated data. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 601152523326..9d73861efc6a 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, goto out; cpu = smp_processor_id(); - data = tr->data[cpu]; + data = per_cpu_ptr(tr->data, cpu); if (!atomic_read(&data->disabled)) { local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, pc); @@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, */ local_irq_save(flags); cpu = raw_smp_processor_id(); - data = tr->data[cpu]; + data = per_cpu_ptr(tr->data, cpu); disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { -- cgit v1.2.3 From 12883efb670c28dff57dcd7f4f995a1ffe153b2d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 5 Mar 2013 09:24:35 -0500 Subject: tracing: Consolidate max_tr into main trace_array structure Currently, the way the latency tracers and snapshot feature works is to have a separate trace_array called "max_tr" that holds the snapshot buffer. For latency tracers, this snapshot buffer is used to swap the running buffer with this buffer to save the current max latency. The only items needed for the max_tr is really just a copy of the buffer itself, the per_cpu data pointers, the time_start timestamp that states when the max latency was triggered, and the cpu that the max latency was triggered on. All other fields in trace_array are unused by the max_tr, making the max_tr mostly bloat. This change removes the max_tr completely, and adds a new structure called trace_buffer, that holds the buffer pointer, the per_cpu data pointers, the time_start timestamp, and the cpu where the latency occurred. The trace_array, now has two trace_buffers, one for the normal trace and one for the max trace or snapshot. By doing this, not only do we remove the bloat from the max_trace but the instances of traces can now use their own snapshot feature and not have just the top level global_trace have the snapshot feature and latency tracers for itself. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9d73861efc6a..e467c0c7bdd5 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -28,7 +28,7 @@ static void tracing_stop_function_trace(void); static int function_trace_init(struct trace_array *tr) { func_trace = tr; - tr->cpu = get_cpu(); + tr->trace_buffer.cpu = get_cpu(); put_cpu(); tracing_start_cmdline_record(); @@ -44,7 +44,7 @@ static void function_trace_reset(struct trace_array *tr) static void function_trace_start(struct trace_array *tr) { - tracing_reset_online_cpus(tr); + tracing_reset_online_cpus(&tr->trace_buffer); } /* Our option */ @@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, goto out; cpu = smp_processor_id(); - data = per_cpu_ptr(tr->data, cpu); + data = per_cpu_ptr(tr->trace_buffer.data, cpu); if (!atomic_read(&data->disabled)) { local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, pc); @@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, */ local_irq_save(flags); cpu = raw_smp_processor_id(); - data = per_cpu_ptr(tr->data, cpu); + data = per_cpu_ptr(tr->trace_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { -- cgit v1.2.3 From 1c31714328be90764e46716f31fb0bd6da44c305 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Sat, 9 Mar 2013 08:36:53 -0500 Subject: tracing: Consolidate updating of count for traceon/off Remove some duplicate code and replace it with a helper function. This makes the code a it cleaner. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index e467c0c7bdd5..38cfb290ecd9 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -214,38 +214,37 @@ static struct tracer function_trace __read_mostly = }; #ifdef CONFIG_DYNAMIC_FTRACE -static void -ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) +static int update_count(void **data) { - long *count = (long *)data; - - if (tracing_is_on()) - return; + unsigned long *count = (long *)data; if (!*count) - return; + return 0; if (*count != -1) (*count)--; - tracing_on(); + return 1; } static void -ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) +ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) { - long *count = (long *)data; - - if (!tracing_is_on()) + if (tracing_is_on()) return; - if (!*count) - return; + if (update_count(data)) + tracing_on(); +} - if (*count != -1) - (*count)--; +static void +ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) +{ + if (!tracing_is_on()) + return; - tracing_off(); + if (update_count(data)) + tracing_off(); } static int -- cgit v1.2.3 From 8b8fa62c60e03a53c46324075a8dc25821741daa Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 12 Mar 2013 09:25:00 -0400 Subject: tracing: Consolidate ftrace_trace_onoff_unreg() into callback The only thing ftrace_trace_onoff_unreg() does is to do a strcmp() against the cmd parameter to determine what op to unregister. But this compare is also done after the location that this function is called (and returns). By moving the check for '!' to unregister after the strcmp(), the callback function itself can just do the unregister and we can get rid of the helper function. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 38cfb290ecd9..a88a3e0b0cc2 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -282,22 +282,6 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, return 0; } -static int -ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) -{ - struct ftrace_probe_ops *ops; - - /* we register both traceon and traceoff to this callback */ - if (strcmp(cmd, "traceon") == 0) - ops = &traceon_probe_ops; - else - ops = &traceoff_probe_ops; - - unregister_ftrace_function_probe_func(glob, ops); - - return 0; -} - static int ftrace_trace_onoff_callback(struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) @@ -311,15 +295,17 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, if (!enable) return -EINVAL; - if (glob[0] == '!') - return ftrace_trace_onoff_unreg(glob+1, cmd, param); - /* we register both traceon and traceoff to this callback */ if (strcmp(cmd, "traceon") == 0) ops = &traceon_probe_ops; else ops = &traceoff_probe_ops; + if (glob[0] == '!') { + unregister_ftrace_function_probe_func(glob+1, ops); + return 0; + } + if (!param) goto out_reg; -- cgit v1.2.3 From 8380d24860e9d1659ab22896b86d7fe591c424fa Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Sat, 9 Mar 2013 08:56:43 -0500 Subject: ftrace: Separate unlimited probes from count limited probes The function tracing probes that trigger traceon or traceoff can be set to unlimited, or given a count of # of times to execute. By separating these two types of probes, we can then use the dynamic ftrace function filtering directly, and remove the brute force "check if this function called is my probe" routines in ftrace. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index a88a3e0b0cc2..043b2425ae73 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -228,7 +228,7 @@ static int update_count(void **data) } static void -ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) +ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) { if (tracing_is_on()) return; @@ -238,7 +238,7 @@ ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) } static void -ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) +ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) { if (!tracing_is_on()) return; @@ -247,10 +247,38 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) tracing_off(); } +static void +ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) +{ + if (tracing_is_on()) + return; + + tracing_on(); +} + +static void +ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) +{ + if (!tracing_is_on()) + return; + + tracing_off(); +} + static int ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data); +static struct ftrace_probe_ops traceon_count_probe_ops = { + .func = ftrace_traceon_count, + .print = ftrace_trace_onoff_print, +}; + +static struct ftrace_probe_ops traceoff_count_probe_ops = { + .func = ftrace_traceoff_count, + .print = ftrace_trace_onoff_print, +}; + static struct ftrace_probe_ops traceon_probe_ops = { .func = ftrace_traceon, .print = ftrace_trace_onoff_print, @@ -269,7 +297,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, seq_printf(m, "%ps:", (void *)ip); - if (ops == &traceon_probe_ops) + if (ops == &traceon_probe_ops || ops == &traceon_count_probe_ops) seq_printf(m, "traceon"); else seq_printf(m, "traceoff"); @@ -297,9 +325,9 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, /* we register both traceon and traceoff to this callback */ if (strcmp(cmd, "traceon") == 0) - ops = &traceon_probe_ops; + ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; else - ops = &traceoff_probe_ops; + ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; if (glob[0] == '!') { unregister_ftrace_function_probe_func(glob+1, ops); -- cgit v1.2.3 From dd42cd3ea96d687f15525c4f14fa582702db223f Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 13 Mar 2013 10:17:50 -0400 Subject: tracing: Add function probe to trigger stack traces Add a function probe that will cause a stack trace to be traced in the ring buffer when the given function(s) are called. format is: :stacktrace[:] echo 'schedule:stacktrace' > /debug/tracing/set_ftrace_filter cat /debug/tracing/trace_pipe kworker/2:0-4329 [002] ...2 2933.558007: => kthread => ret_from_fork -0 [000] .N.2 2933.558019: => rest_init => start_kernel => x86_64_start_reservations => x86_64_start_kernel kworker/2:0-4329 [002] ...2 2933.558109: => kthread => ret_from_fork [...] This can be set to only trace a specific amount of times: echo 'schedule:stacktrace:3' > /debug/tracing/set_ftrace_filter cat /debug/tracing/trace_pipe <...>-58 [003] ...2 841.801694: => kthread => ret_from_fork -0 [001] .N.2 841.801697: => start_secondary <...>-2059 [001] ...2 841.801736: => wait_for_common => wait_for_completion => flush_work => tty_flush_to_ldisc => input_available_p => n_tty_poll => tty_poll => do_select => core_sys_select => sys_select => system_call_fastpath To remove these: echo '!schedule:stacktrace' > /debug/tracing/set_ftrace_filter echo '!schedule:stacktrace:0' > /debug/tracing/set_ftrace_filter Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 150 +++++++++++++++++++++++++++++++---------- 1 file changed, 115 insertions(+), 35 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 043b2425ae73..c4d6d7191988 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -265,56 +265,103 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) tracing_off(); } +/* + * Skip 4: + * ftrace_stacktrace() + * function_trace_probe_call() + * ftrace_ops_list_func() + * ftrace_call() + */ +#define STACK_SKIP 4 + +static void +ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) +{ + trace_dump_stack(STACK_SKIP); +} + +static void +ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) +{ + if (!tracing_is_on()) + return; + + if (update_count(data)) + trace_dump_stack(STACK_SKIP); +} + static int -ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, - struct ftrace_probe_ops *ops, void *data); +ftrace_probe_print(const char *name, struct seq_file *m, + unsigned long ip, void *data) +{ + long count = (long)data; + + seq_printf(m, "%ps:%s", (void *)ip, name); + + if (count == -1) + seq_printf(m, ":unlimited\n"); + else + seq_printf(m, ":count=%ld\n", count); + + return 0; +} + +static int +ftrace_traceon_print(struct seq_file *m, unsigned long ip, + struct ftrace_probe_ops *ops, void *data) +{ + return ftrace_probe_print("traceon", m, ip, data); +} + +static int +ftrace_traceoff_print(struct seq_file *m, unsigned long ip, + struct ftrace_probe_ops *ops, void *data) +{ + return ftrace_probe_print("traceoff", m, ip, data); +} + +static int +ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, + struct ftrace_probe_ops *ops, void *data) +{ + return ftrace_probe_print("stacktrace", m, ip, data); +} static struct ftrace_probe_ops traceon_count_probe_ops = { .func = ftrace_traceon_count, - .print = ftrace_trace_onoff_print, + .print = ftrace_traceon_print, }; static struct ftrace_probe_ops traceoff_count_probe_ops = { .func = ftrace_traceoff_count, - .print = ftrace_trace_onoff_print, + .print = ftrace_traceoff_print, +}; + +static struct ftrace_probe_ops stacktrace_count_probe_ops = { + .func = ftrace_stacktrace_count, + .print = ftrace_stacktrace_print, }; static struct ftrace_probe_ops traceon_probe_ops = { .func = ftrace_traceon, - .print = ftrace_trace_onoff_print, + .print = ftrace_traceon_print, }; static struct ftrace_probe_ops traceoff_probe_ops = { .func = ftrace_traceoff, - .print = ftrace_trace_onoff_print, + .print = ftrace_traceoff_print, }; -static int -ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, - struct ftrace_probe_ops *ops, void *data) -{ - long count = (long)data; - - seq_printf(m, "%ps:", (void *)ip); - - if (ops == &traceon_probe_ops || ops == &traceon_count_probe_ops) - seq_printf(m, "traceon"); - else - seq_printf(m, "traceoff"); - - if (count == -1) - seq_printf(m, ":unlimited\n"); - else - seq_printf(m, ":count=%ld\n", count); - - return 0; -} +static struct ftrace_probe_ops stacktrace_probe_ops = { + .func = ftrace_stacktrace, + .print = ftrace_stacktrace_print, +}; static int -ftrace_trace_onoff_callback(struct ftrace_hash *hash, - char *glob, char *cmd, char *param, int enable) +ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, + struct ftrace_hash *hash, char *glob, + char *cmd, char *param, int enable) { - struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; @@ -323,12 +370,6 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, if (!enable) return -EINVAL; - /* we register both traceon and traceoff to this callback */ - if (strcmp(cmd, "traceon") == 0) - ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; - else - ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; - if (glob[0] == '!') { unregister_ftrace_function_probe_func(glob+1, ops); return 0; @@ -356,6 +397,34 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, return ret < 0 ? ret : 0; } +static int +ftrace_trace_onoff_callback(struct ftrace_hash *hash, + char *glob, char *cmd, char *param, int enable) +{ + struct ftrace_probe_ops *ops; + + /* we register both traceon and traceoff to this callback */ + if (strcmp(cmd, "traceon") == 0) + ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; + else + ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; + + return ftrace_trace_probe_callback(ops, hash, glob, cmd, + param, enable); +} + +static int +ftrace_stacktrace_callback(struct ftrace_hash *hash, + char *glob, char *cmd, char *param, int enable) +{ + struct ftrace_probe_ops *ops; + + ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; + + return ftrace_trace_probe_callback(ops, hash, glob, cmd, + param, enable); +} + static struct ftrace_func_command ftrace_traceon_cmd = { .name = "traceon", .func = ftrace_trace_onoff_callback, @@ -366,6 +435,11 @@ static struct ftrace_func_command ftrace_traceoff_cmd = { .func = ftrace_trace_onoff_callback, }; +static struct ftrace_func_command ftrace_stacktrace_cmd = { + .name = "stacktrace", + .func = ftrace_stacktrace_callback, +}; + static int __init init_func_cmd_traceon(void) { int ret; @@ -377,6 +451,12 @@ static int __init init_func_cmd_traceon(void) ret = register_ftrace_command(&ftrace_traceon_cmd); if (ret) unregister_ftrace_command(&ftrace_traceoff_cmd); + + ret = register_ftrace_command(&ftrace_stacktrace_cmd); + if (ret) { + unregister_ftrace_command(&ftrace_traceoff_cmd); + unregister_ftrace_command(&ftrace_traceon_cmd); + } return ret; } #else -- cgit v1.2.3