summaryrefslogtreecommitdiff
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-07-03 15:48:16 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-11-11 12:41:52 -0500
commit12cce594fa8f12e002e7eb5d10141853c1e6a112 (patch)
treef508de25f865ead9162c4d5213874394ac06fdde /kernel/trace/ftrace.c
parent15d5b02cc575e5b20ddfa1645fc1242f0b0ba1c8 (diff)
ftrace/x86: Allow !CONFIG_PREEMPT dynamic ops to use allocated trampolines
When the static ftrace_ops (like function tracer) enables tracing, and it is the only callback that is referencing a function, a trampoline is dynamically allocated to the function that calls the callback directly instead of calling a loop function that iterates over all the registered ftrace ops (if more than one ops is registered). But when it comes to dynamically allocated ftrace_ops, where they may be freed, on a CONFIG_PREEMPT kernel there's no way to know when it is safe to free the trampoline. If a task was preempted while executing on the trampoline, there's currently no way to know when it will be off that trampoline. But this is not true when it comes to !CONFIG_PREEMPT. The current method of calling schedule_on_each_cpu() will force tasks off the trampoline, becaues they can not schedule while on it (kernel preemption is not configured). That means it is safe to free a dynamically allocated ftrace ops trampoline when CONFIG_PREEMPT is not configured. Cc: H. Peter Anvin <hpa@linux.intel.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Borislav Petkov <bp@suse.de> Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Tested-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 422e1f8300b1..eab3123a1fbe 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2324,6 +2324,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
static ftrace_func_t saved_ftrace_func;
static int ftrace_start_up;
+void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
+{
+}
+
static void control_ops_free(struct ftrace_ops *ops)
{
free_percpu(ops->disabled);
@@ -2475,6 +2479,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
schedule_on_each_cpu(ftrace_sync);
+ arch_ftrace_trampoline_free(ops);
+
if (ops->flags & FTRACE_OPS_FL_CONTROL)
control_ops_free(ops);
}
@@ -4725,9 +4731,21 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
+
+/*
+ * Currently there's no safe way to free a trampoline when the kernel
+ * is configured with PREEMPT. That is because a task could be preempted
+ * when it jumped to the trampoline, it may be preempted for a long time
+ * depending on the system load, and currently there's no way to know
+ * when it will be off the trampoline. If the trampoline is freed
+ * too early, when the task runs again, it will be executing on freed
+ * memory and crash.
+ */
+#ifdef CONFIG_PREEMPT
/* Currently, only non dynamic ops can have a trampoline */
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
return;
+#endif
arch_ftrace_update_trampoline(ops);
}