summaryrefslogtreecommitdiff
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-08-06 16:08:44 (GMT)
committerSteven Rostedt <rostedt@goodmis.org>2013-08-14 01:05:57 (GMT)
commit12473965c38a527a0c6f7a38d23edce60957f873 (patch)
tree390d32b5ae485dda15302f370812bf668f5cdd87 /include/trace/ftrace.h
parent36009d07b79d2a168d6037947357d96e5d8cebe7 (diff)
downloadlinux-fsl-qoriq-12473965c38a527a0c6f7a38d23edce60957f873.tar.xz
tracing/perf: Reimplement TP_perf_assign() logic
The next patch tries to avoid the costly perf_trace_buf_* calls when possible but there is a problem. We can only do this if __task == NULL, perf_tp_event(task != NULL) has the additional code for this case. Unfortunately, TP_perf_assign/__perf_xxx which changes the default values of __count/__task variables for perf_trace_buf_submit() is called "too late", after we already did perf_trace_buf_prepare(), and the optimization above can't work. So this patch simply embeds __perf_xxx() into TP_ARGS(), this way DECLARE_EVENT_CLASS() can use the result of assignments hidden in "args" right after ftrace_get_offsets_##call() which is mostly trivial. This allows us to have the fast-path "__task != NULL" check at the start, see the next patch. Link: http://lkml.kernel.org/r/20130806160844.GA2739@redhat.com Tested-by: David Ahern <dsahern@gmail.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h19
1 files changed, 11 insertions, 8 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 618af05..4163d93 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -507,8 +507,14 @@ static inline notrace int ftrace_get_offsets_##call( \
#undef TP_fast_assign
#define TP_fast_assign(args...) args
-#undef TP_perf_assign
-#define TP_perf_assign(args...)
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -636,16 +642,13 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#define __get_str(field) (char *)__get_dynamic_array(field)
#undef __perf_addr
-#define __perf_addr(a) __addr = (a)
+#define __perf_addr(a) (__addr = (a))
#undef __perf_count
-#define __perf_count(c) __count = (c)
+#define __perf_count(c) (__count = (c))
#undef __perf_task
-#define __perf_task(t) __task = (t)
-
-#undef TP_perf_assign
-#define TP_perf_assign(args...) args
+#define __perf_task(t) (__task = (t))
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \