summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/extable.c15
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/events.c1
-rw-r--r--kernel/trace/ftrace.c68
-rw-r--r--kernel/trace/ring_buffer.c125
-rw-r--r--kernel/trace/trace.c161
-rw-r--r--kernel/trace/trace.h94
-rw-r--r--kernel/trace/trace_clock.c1
-rw-r--r--kernel/trace/trace_event_profile.c31
-rw-r--r--kernel/trace/trace_event_types.h2
-rw-r--r--kernel/trace/trace_events.c246
-rw-r--r--kernel/trace/trace_events_filter.c427
-rw-r--r--kernel/trace/trace_events_stage_2.h45
-rw-r--r--kernel/trace/trace_events_stage_3.h68
-rw-r--r--kernel/trace/trace_functions_graph.c227
-rw-r--r--kernel/trace/trace_mmiotrace.c2
-rw-r--r--kernel/trace/trace_nop.c1
-rw-r--r--kernel/trace/trace_output.c56
-rw-r--r--kernel/trace/trace_output.h44
-rw-r--r--kernel/trace/trace_printk.c8
-rw-r--r--kernel/trace/trace_selftest.c38
-rw-r--r--kernel/trace/trace_stat.c47
-rw-r--r--kernel/trace/trace_workqueue.c12
24 files changed, 1446 insertions, 281 deletions
diff --git a/kernel/extable.c b/kernel/extable.c
index 0df6253..b54a601 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -15,11 +15,22 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/ftrace.h>
+#include <linux/memory.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/init.h>
-#include <linux/ftrace.h>
-#include <asm/uaccess.h>
+
#include <asm/sections.h>
+#include <asm/uaccess.h>
+
+/*
+ * mutex protecting text section modification (dynamic code patching).
+ * some users need to sleep (allocating memory...) while they hold this lock.
+ *
+ * NOT exported to modules - patching kernel text is a really delicate matter.
+ */
+DEFINE_MUTEX(text_mutex);
extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[];
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index b0a46f8..8a4d729 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -63,7 +63,11 @@ config TRACING
#
config TRACING_SUPPORT
bool
- depends on TRACE_IRQFLAGS_SUPPORT
+ # PPC32 has no irqflags tracing support, but it can use most of the
+ # tracers anyway, they were tested to build and work. Note that new
+ # exceptions to this list aren't welcomed, better implement the
+ # irqflags tracing for your architecture.
+ depends on TRACE_IRQFLAGS_SUPPORT || PPC32
depends on STACKTRACE_SUPPORT
default y
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index c3feea0..2630f51 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -44,5 +44,7 @@ obj-$(CONFIG_EVENT_TRACER) += trace_events.o
obj-$(CONFIG_EVENT_TRACER) += events.o
obj-$(CONFIG_EVENT_TRACER) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
+obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
+obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/events.c b/kernel/trace/events.c
index 9fc918d..246f2aa 100644
--- a/kernel/trace/events.c
+++ b/kernel/trace/events.c
@@ -12,4 +12,3 @@
#include "trace_events_stage_2.h"
#include "trace_events_stage_3.h"
-#include <trace/trace_event_types.h>
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7847806..1752a63 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -29,6 +29,8 @@
#include <linux/list.h>
#include <linux/hash.h>
+#include <trace/sched.h>
+
#include <asm/ftrace.h>
#include "trace.h"
@@ -339,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec)
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
- rec->ip = (unsigned long)ftrace_free_records;
+ rec->freelist = ftrace_free_records;
ftrace_free_records = rec;
rec->flags |= FTRACE_FL_FREE;
}
@@ -356,9 +358,14 @@ void ftrace_release(void *start, unsigned long size)
mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) {
- if ((rec->ip >= s) && (rec->ip < e) &&
- !(rec->flags & FTRACE_FL_FREE))
+ if ((rec->ip >= s) && (rec->ip < e)) {
+ /*
+ * rec->ip is changed in ftrace_free_rec()
+ * It should not between s and e if record was freed.
+ */
+ FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
ftrace_free_rec(rec);
+ }
} while_for_each_ftrace_rec();
mutex_unlock(&ftrace_lock);
}
@@ -377,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
return NULL;
}
- ftrace_free_records = (void *)rec->ip;
+ ftrace_free_records = rec->freelist;
memset(rec, 0, sizeof(*rec));
return rec;
}
@@ -409,7 +416,7 @@ ftrace_record_ip(unsigned long ip)
return NULL;
rec->ip = ip;
- rec->flags = (unsigned long)ftrace_new_addrs;
+ rec->newlist = ftrace_new_addrs;
ftrace_new_addrs = rec;
return rec;
@@ -729,7 +736,7 @@ static int ftrace_update_code(struct module *mod)
return -1;
p = ftrace_new_addrs;
- ftrace_new_addrs = (struct dyn_ftrace *)p->flags;
+ ftrace_new_addrs = p->newlist;
p->flags = 0L;
/* convert record (i.e, patch mcount-call with NOP) */
@@ -2262,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
if (ftrace_pid_trace == ftrace_swapper_pid)
r = sprintf(buf, "swapper tasks\n");
else if (ftrace_pid_trace)
- r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
+ r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
else
r = sprintf(buf, "no pid\n");
@@ -2590,6 +2597,38 @@ free:
return ret;
}
+static void
+ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ unsigned long long timestamp;
+ int index;
+
+ /*
+ * Does the user want to count the time a function was asleep.
+ * If so, do not update the time stamps.
+ */
+ if (trace_flags & TRACE_ITER_SLEEP_TIME)
+ return;
+
+ timestamp = trace_clock_local();
+
+ prev->ftrace_timestamp = timestamp;
+
+ /* only process tasks that we timestamped */
+ if (!next->ftrace_timestamp)
+ return;
+
+ /*
+ * Update all the counters in next to make up for the
+ * time next was sleeping.
+ */
+ timestamp -= next->ftrace_timestamp;
+
+ for (index = next->curr_ret_stack; index >= 0; index--)
+ next->ret_stack[index].calltime += timestamp;
+}
+
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
{
@@ -2611,6 +2650,13 @@ static int start_graph_tracing(void)
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
+ if (!ret) {
+ ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
+ if (ret)
+ pr_info("ftrace_graph: Couldn't activate tracepoint"
+ " probe to kernel_sched_switch\n");
+ }
+
kfree(ret_stack_list);
return ret;
}
@@ -2643,6 +2689,12 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
mutex_lock(&ftrace_lock);
+ /* we currently allow only one tracer registered at a time */
+ if (atomic_read(&ftrace_graph_active)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
register_pm_notifier(&ftrace_suspend_notifier);
@@ -2668,6 +2720,7 @@ void unregister_ftrace_graph(void)
mutex_lock(&ftrace_lock);
atomic_dec(&ftrace_graph_active);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
@@ -2688,6 +2741,7 @@ void ftrace_graph_init_task(struct task_struct *t)
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
} else
t->ret_stack = NULL;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 384ca5d..edce2ff 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -189,16 +189,65 @@ enum {
RB_LEN_TIME_STAMP = 16,
};
-/* inline for ring buffer fast paths */
+static inline int rb_null_event(struct ring_buffer_event *event)
+{
+ return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
+}
+
+static inline int rb_discarded_event(struct ring_buffer_event *event)
+{
+ return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
+}
+
+static void rb_event_set_padding(struct ring_buffer_event *event)
+{
+ event->type = RINGBUF_TYPE_PADDING;
+ event->time_delta = 0;
+}
+
+/**
+ * ring_buffer_event_discard - discard an event in the ring buffer
+ * @buffer: the ring buffer
+ * @event: the event to discard
+ *
+ * Sometimes a event that is in the ring buffer needs to be ignored.
+ * This function lets the user discard an event in the ring buffer
+ * and then that event will not be read later.
+ *
+ * Note, it is up to the user to be careful with this, and protect
+ * against races. If the user discards an event that has been consumed
+ * it is possible that it could corrupt the ring buffer.
+ */
+void ring_buffer_event_discard(struct ring_buffer_event *event)
+{
+ event->type = RINGBUF_TYPE_PADDING;
+ /* time delta must be non zero */
+ if (!event->time_delta)
+ event->time_delta = 1;
+}
+
static unsigned
-rb_event_length(struct ring_buffer_event *event)
+rb_event_data_length(struct ring_buffer_event *event)
{
unsigned length;
+ if (event->len)
+ length = event->len * RB_ALIGNMENT;
+ else
+ length = event->array[0];
+ return length + RB_EVNT_HDR_SIZE;
+}
+
+/* inline for ring buffer fast paths */
+static unsigned
+rb_event_length(struct ring_buffer_event *event)
+{
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- /* undefined */
- return -1;
+ if (rb_null_event(event))
+ /* undefined */
+ return -1;
+ return rb_event_data_length(event);
case RINGBUF_TYPE_TIME_EXTEND:
return RB_LEN_TIME_EXTEND;
@@ -207,11 +256,7 @@ rb_event_length(struct ring_buffer_event *event)
return RB_LEN_TIME_STAMP;
case RINGBUF_TYPE_DATA:
- if (event->len)
- length = event->len * RB_ALIGNMENT;
- else
- length = event->array[0];
- return length + RB_EVNT_HDR_SIZE;
+ return rb_event_data_length(event);
default:
BUG();
}
@@ -535,8 +580,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
extern int ring_buffer_page_too_big(void);
#ifdef CONFIG_HOTPLUG_CPU
-static int __cpuinit rb_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu);
+static int rb_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
#endif
/**
@@ -845,11 +890,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
-static inline int rb_null_event(struct ring_buffer_event *event)
-{
- return event->type == RINGBUF_TYPE_PADDING;
-}
-
static inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
@@ -1219,7 +1259,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
- event->type = RINGBUF_TYPE_PADDING;
+ rb_event_set_padding(event);
}
if (tail <= BUF_PAGE_SIZE)
@@ -1969,7 +2009,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
event = rb_reader_event(cpu_buffer);
- if (event->type == RINGBUF_TYPE_DATA)
+ if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
cpu_buffer->entries--;
rb_update_read_stamp(cpu_buffer, event);
@@ -2052,9 +2092,18 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- RB_WARN_ON(cpu_buffer, 1);
+ if (rb_null_event(event))
+ RB_WARN_ON(cpu_buffer, 1);
+ /*
+ * Because the writer could be discarding every
+ * event it creates (which would probably be bad)
+ * if we were to go back to "again" then we may never
+ * catch up, and will trigger the warn on, or lock
+ * the box. Return the padding, and we will release
+ * the current locks, and try again.
+ */
rb_advance_reader(cpu_buffer);
- return NULL;
+ return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
@@ -2115,8 +2164,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- rb_inc_iter(iter);
- goto again;
+ if (rb_null_event(event)) {
+ rb_inc_iter(iter);
+ goto again;
+ }
+ rb_advance_iter(iter);
+ return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
@@ -2163,10 +2216,16 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
+ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
@@ -2185,10 +2244,16 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event;
unsigned long flags;
+ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
@@ -2207,6 +2272,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_event *event = NULL;
unsigned long flags;
+ again:
/* might be called in atomic */
preempt_disable();
@@ -2228,6 +2294,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
out:
preempt_enable();
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_consume);
@@ -2306,6 +2377,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
+ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
if (!event)
@@ -2315,6 +2387,11 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_read);
@@ -2784,8 +2861,8 @@ static __init int rb_init_debugfs(void)
fs_initcall(rb_init_debugfs);
#ifdef CONFIG_HOTPLUG_CPU
-static int __cpuinit rb_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int rb_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c95b729..a0174a4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -255,7 +255,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
- TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO;
+ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
/**
* trace_wake_up - wake up tasks waiting for trace input
@@ -316,6 +316,7 @@ static const char *trace_options[] = {
"context-info",
"latency-format",
"global-clock",
+ "sleep-time",
NULL
};
@@ -382,7 +383,7 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
return cnt;
}
-ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
+static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
void *ret;
@@ -860,15 +861,25 @@ static void ftrace_trace_stack(struct trace_array *tr,
static void ftrace_trace_userstack(struct trace_array *tr,
unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct trace_array *tr,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
+static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ int wake)
{
ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc);
- trace_wake_up();
+
+ if (wake)
+ trace_wake_up();
+}
+
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
}
struct ring_buffer_event *
@@ -882,7 +893,13 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- return trace_buffer_unlock_commit(&global_trace, event, flags, pc);
+ return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
+}
+
+void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
}
void
@@ -908,7 +925,7 @@ trace_function(struct trace_array *tr,
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void __trace_graph_entry(struct trace_array *tr,
+static int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
@@ -917,15 +934,17 @@ static void __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent_entry *entry;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- return;
+ return 0;
event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
- return;
+ return 0;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event);
+
+ return 1;
}
static void __trace_graph_return(struct trace_array *tr,
@@ -1146,6 +1165,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
+ int ret;
int cpu;
int pc;
@@ -1161,15 +1181,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- __trace_graph_entry(tr, trace, flags, pc);
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ } else {
+ ret = 0;
}
/* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current))
set_tsk_trace_graph(current);
+
atomic_dec(&data->disabled);
local_irq_restore(flags);
- return 1;
+ return ret;
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1201,7 +1224,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
* trace_vbprintk - write binary msg to tracing buffer
*
*/
-int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
static raw_spinlock_t trace_buf_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
@@ -1243,7 +1266,6 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
- entry->depth = depth;
entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
@@ -1261,7 +1283,7 @@ out:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
-int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
@@ -1298,7 +1320,6 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
- entry->depth = depth;
memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = 0;
@@ -1701,38 +1722,6 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
return TRACE_TYPE_HANDLED;
}
-static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
-{
- struct trace_seq *s = &iter->seq;
- struct trace_entry *entry = iter->ent;
- struct bprint_entry *field;
- int ret;
-
- trace_assign_type(field, entry);
-
- ret = trace_seq_bprintf(s, field->fmt, field->buf);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
-{
- struct trace_seq *s = &iter->seq;
- struct trace_entry *entry = iter->ent;
- struct print_entry *field;
- int ret;
-
- trace_assign_type(field, entry);
-
- ret = trace_seq_printf(s, "%s", field->buf);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- return TRACE_TYPE_HANDLED;
-}
-
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
@@ -1794,12 +1783,12 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
if (iter->ent->type == TRACE_BPRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
- return print_bprintk_msg_only(iter);
+ return trace_print_bprintk_msg_only(iter);
if (iter->ent->type == TRACE_PRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
- return print_printk_msg_only(iter);
+ return trace_print_printk_msg_only(iter);
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
@@ -1948,9 +1937,14 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
- struct trace_iterator *iter = m->private;
+ struct trace_iterator *iter;
int cpu;
+ if (!(file->f_mode & FMODE_READ))
+ return 0;
+
+ iter = m->private;
+
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
@@ -1976,12 +1970,24 @@ static int tracing_open(struct inode *inode, struct file *file)
struct trace_iterator *iter;
int ret = 0;
- iter = __tracing_open(inode, file);
- if (IS_ERR(iter))
- ret = PTR_ERR(iter);
- else if (trace_flags & TRACE_ITER_LATENCY_FMT)
- iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ /* If this file was open for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) &&
+ !(file->f_flags & O_APPEND)) {
+ long cpu = (long) inode->i_private;
+ if (cpu == TRACE_PIPE_ALL_CPU)
+ tracing_reset_online_cpus(&global_trace);
+ else
+ tracing_reset(&global_trace, cpu);
+ }
+
+ if (file->f_mode & FMODE_READ) {
+ iter = __tracing_open(inode, file);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ }
return ret;
}
@@ -2056,9 +2062,17 @@ static int show_traces_open(struct inode *inode, struct file *file)
return ret;
}
+static ssize_t
+tracing_write_stub(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
+ .write = tracing_write_stub,
.llseek = seq_lseek,
.release = tracing_release,
};
@@ -3154,7 +3168,7 @@ static int mark_printk(const char *fmt, ...)
int ret;
va_list args;
va_start(args, fmt);
- ret = trace_vprintk(0, -1, fmt, args);
+ ret = trace_vprintk(0, fmt, args);
va_end(args);
return ret;
}
@@ -3522,6 +3536,9 @@ struct dentry *tracing_init_dentry(void)
if (d_tracer)
return d_tracer;
+ if (!debugfs_initialized())
+ return NULL;
+
d_tracer = debugfs_create_dir("tracing", NULL);
if (!d_tracer && !once) {
@@ -3583,7 +3600,7 @@ static void tracing_init_debugfs_percpu(long cpu)
pr_warning("Could not create debugfs 'trace_pipe' entry\n");
/* per cpu trace */
- entry = debugfs_create_file("trace", 0444, d_cpu,
+ entry = debugfs_create_file("trace", 0644, d_cpu,
(void *) cpu, &tracing_fops);
if (!entry)
pr_warning("Could not create debugfs 'trace' entry\n");
@@ -3897,7 +3914,7 @@ static __init int tracer_init_debugfs(void)
if (!entry)
pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
- entry = debugfs_create_file("trace", 0444, d_tracer,
+ entry = debugfs_create_file("trace", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
if (!entry)
pr_warning("Could not create debugfs 'trace' entry\n");
@@ -4027,11 +4044,12 @@ trace_printk_seq(struct trace_seq *s)
trace_seq_init(s);
}
-void ftrace_dump(void)
+static void __ftrace_dump(bool disable_tracing)
{
static DEFINE_SPINLOCK(ftrace_dump_lock);
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ unsigned int old_userobj;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
@@ -4043,14 +4061,17 @@ void ftrace_dump(void)
dump_ran = 1;
- /* No turning back! */
tracing_off();
- ftrace_kill();
+
+ if (disable_tracing)
+ ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
}
+ old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
+
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
@@ -4095,10 +4116,26 @@ void ftrace_dump(void)
else
printk(KERN_TRACE "---------------------------------\n");
+ /* Re-enable tracing if requested */
+ if (!disable_tracing) {
+ trace_flags |= old_userobj;
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ }
+ tracing_on();
+ }
+
out:
spin_unlock_irqrestore(&ftrace_dump_lock, flags);
}
+/* By default: disable tracing after the dump */
+void ftrace_dump(void)
+{
+ __ftrace_dump(true);
+}
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 38276d1..cb0ce3f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -123,7 +123,6 @@ struct userstack_entry {
struct bprint_entry {
struct trace_entry ent;
unsigned long ip;
- int depth;
const char *fmt;
u32 buf[];
};
@@ -131,7 +130,6 @@ struct bprint_entry {
struct print_entry {
struct trace_entry ent;
unsigned long ip;
- int depth;
char buf[];
};
@@ -485,6 +483,8 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
unsigned long flags, int pc);
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc);
+void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -598,9 +598,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
extern void *head_page(struct trace_array_cpu *data);
extern long ns2usecs(cycle_t nsec);
extern int
-trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args);
+trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
extern int
-trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
+trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern unsigned long trace_flags;
@@ -685,6 +685,7 @@ enum trace_iterator_flags {
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x40000,
TRACE_ITER_GLOBAL_CLK = 0x80000,
+ TRACE_ITER_SLEEP_TIME = 0x100000,
};
/*
@@ -777,22 +778,89 @@ enum {
TRACE_EVENT_TYPE_RAW = 2,
};
+struct ftrace_event_field {
+ struct list_head link;
+ char *name;
+ char *type;
+ int offset;
+ int size;
+};
+
struct ftrace_event_call {
- char *name;
- char *system;
- struct dentry *dir;
- int enabled;
- int (*regfunc)(void);
- void (*unregfunc)(void);
- int id;
- int (*raw_init)(void);
- int (*show_format)(struct trace_seq *s);
+ char *name;
+ char *system;
+ struct dentry *dir;
+ int enabled;
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ int id;
+ int (*raw_init)(void);
+ int (*show_format)(struct trace_seq *s);
+ int (*define_fields)(void);
+ struct list_head fields;
+ struct filter_pred **preds;
+
+#ifdef CONFIG_EVENT_PROFILE
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
+#endif
};
+struct event_subsystem {
+ struct list_head list;
+ const char *name;
+ struct dentry *entry;
+ struct filter_pred **preds;
+};
+
+#define events_for_each(event) \
+ for (event = __start_ftrace_events; \
+ (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+ event++)
+
+#define MAX_FILTER_PRED 8
+
+struct filter_pred;
+
+typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
+
+struct filter_pred {
+ filter_pred_fn_t fn;
+ u64 val;
+ char *str_val;
+ int str_len;
+ char *field_name;
+ int offset;
+ int not;
+ int or;
+ int compound;
+ int clear;
+};
+
+int trace_define_field(struct ftrace_event_call *call, char *type,
+ char *name, int offset, int size);
+extern void filter_free_pred(struct filter_pred *pred);
+extern void filter_print_preds(struct filter_pred **preds,
+ struct trace_seq *s);
+extern int filter_parse(char **pbuf, struct filter_pred *pred);
+extern int filter_add_pred(struct ftrace_event_call *call,
+ struct filter_pred *pred);
+extern void filter_free_preds(struct ftrace_event_call *call);
+extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+extern void filter_free_subsystem_preds(struct event_subsystem *system);
+extern int filter_add_subsystem_pred(struct event_subsystem *system,
+ struct filter_pred *pred);
+
void event_trace_printk(unsigned long ip, const char *fmt, ...);
extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[];
+#define for_each_event(event) \
+ for (event = __start_ftrace_events; \
+ (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+ event++)
+
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 05b176a..b588fd8 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -18,6 +18,7 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/trace_clock.h>
/*
* trace_clock_local(): the simplest and least coherent tracing clock.
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
new file mode 100644
index 0000000..22cba99
--- /dev/null
+++ b/kernel/trace/trace_event_profile.c
@@ -0,0 +1,31 @@
+/*
+ * trace event based perf counter profiling
+ *
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ */
+
+#include "trace.h"
+
+int ftrace_profile_enable(int event_id)
+{
+ struct ftrace_event_call *event;
+
+ for_each_event(event) {
+ if (event->id == event_id)
+ return event->profile_enable(event);
+ }
+
+ return -EINVAL;
+}
+
+void ftrace_profile_disable(int event_id)
+{
+ struct ftrace_event_call *event;
+
+ for_each_event(event) {
+ if (event->id == event_id)
+ return event->profile_disable(event);
+ }
+}
+
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
index 0199150..fd78bee 100644
--- a/kernel/trace/trace_event_types.h
+++ b/kernel/trace/trace_event_types.h
@@ -105,7 +105,6 @@ TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
TRACE_STRUCT(
TRACE_FIELD(unsigned long, ip, ip)
- TRACE_FIELD(unsigned int, depth, depth)
TRACE_FIELD(char *, fmt, fmt)
TRACE_FIELD_ZERO_CHAR(buf)
),
@@ -115,7 +114,6 @@ TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
TRACE_STRUCT(
TRACE_FIELD(unsigned long, ip, ip)
- TRACE_FIELD(unsigned int, depth, depth)
TRACE_FIELD_ZERO_CHAR(buf)
),
TP_RAW_FMT("%08lx (%d) fmt:%p %s")
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c88227b..64ec4d2 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -19,10 +19,38 @@
static DEFINE_MUTEX(event_mutex);
-#define events_for_each(event) \
- for (event = __start_ftrace_events; \
- (unsigned long)event < (unsigned long)__stop_ftrace_events; \
- event++)
+int trace_define_field(struct ftrace_event_call *call, char *type,
+ char *name, int offset, int size)
+{
+ struct ftrace_event_field *field;
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ goto err;
+
+ field->name = kstrdup(name, GFP_KERNEL);
+ if (!field->name)
+ goto err;
+
+ field->type = kstrdup(type, GFP_KERNEL);
+ if (!field->type)
+ goto err;
+
+ field->offset = offset;
+ field->size = size;
+ list_add(&field->link, &call->fields);
+
+ return 0;
+
+err:
+ if (field) {
+ kfree(field->name);
+ kfree(field->type);
+ }
+ kfree(field);
+
+ return -ENOMEM;
+}
static void ftrace_clear_events(void)
{
@@ -90,7 +118,7 @@ static int ftrace_set_clr_event(char *buf, int set)
}
mutex_lock(&event_mutex);
- events_for_each(call) {
+ for_each_event(call) {
if (!call->name || !call->regfunc)
continue;
@@ -348,7 +376,8 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
#undef FIELD
#define FIELD(type, name) \
- #type, #name, offsetof(typeof(field), name), sizeof(field.name)
+ #type, "common_" #name, offsetof(typeof(field), name), \
+ sizeof(field.name)
static int trace_write_header(struct trace_seq *s)
{
@@ -412,6 +441,162 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
return r;
}
+static ssize_t
+event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+ trace_seq_printf(s, "%d\n", call->id);
+
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, s->len);
+ kfree(s);
+ return r;
+}
+
+static ssize_t
+event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ filter_print_preds(call->preds, s);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+
+ kfree(s);
+
+ return r;
+}
+
+static ssize_t
+event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ char buf[64], *pbuf = buf;
+ struct filter_pred *pred;
+ int err;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ pred = kzalloc(sizeof(*pred), GFP_KERNEL);
+ if (!pred)
+ return -ENOMEM;
+
+ err = filter_parse(&pbuf, pred);
+ if (err < 0) {
+ filter_free_pred(pred);
+ return err;
+ }
+
+ if (pred->clear) {
+ filter_free_preds(call);
+ filter_free_pred(pred);
+ return cnt;
+ }
+
+ if (filter_add_pred(call, pred)) {
+ filter_free_pred(pred);
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+static ssize_t
+subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct event_subsystem *system = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ filter_print_preds(system->preds, s);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+
+ kfree(s);
+
+ return r;
+}
+
+static ssize_t
+subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct event_subsystem *system = filp->private_data;
+ char buf[64], *pbuf = buf;
+ struct filter_pred *pred;
+ int err;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ pred = kzalloc(sizeof(*pred), GFP_KERNEL);
+ if (!pred)
+ return -ENOMEM;
+
+ err = filter_parse(&pbuf, pred);
+ if (err < 0) {
+ filter_free_pred(pred);
+ return err;
+ }
+
+ if (pred->clear) {
+ filter_free_subsystem_preds(system);
+ filter_free_pred(pred);
+ return cnt;
+ }
+
+ if (filter_add_subsystem_pred(system, pred)) {
+ filter_free_subsystem_preds(system);
+ filter_free_pred(pred);
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
.next = t_next,
@@ -452,6 +637,23 @@ static const struct file_operations ftrace_event_format_fops = {
.read = event_format_read,
};
+static const struct file_operations ftrace_event_id_fops = {
+ .open = tracing_open_generic,
+ .read = event_id_read,
+};
+
+static const struct file_operations ftrace_event_filter_fops = {
+ .open = tracing_open_generic,
+ .read = event_filter_read,
+ .write = event_filter_write,
+};
+
+static const struct file_operations ftrace_subsystem_filter_fops = {
+ .open = tracing_open_generic,
+ .read = subsystem_filter_read,
+ .write = subsystem_filter_write,
+};
+
static struct dentry *event_trace_events_dir(void)
{
static struct dentry *d_tracer;
@@ -472,12 +674,6 @@ static struct dentry *event_trace_events_dir(void)
return d_events;
}
-struct event_subsystem {
- struct list_head list;
- const char *name;
- struct dentry *entry;
-};
-
static LIST_HEAD(event_subsystems);
static struct dentry *
@@ -510,6 +706,8 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
system->name = name;
list_add(&system->list, &event_subsystems);
+ system->preds = NULL;
+
return system->entry;
}
@@ -550,6 +748,28 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
"'%s/enable' entry\n", call->name);
}
+ if (call->id) {
+ entry = debugfs_create_file("id", 0444, call->dir, call,
+ &ftrace_event_id_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs '%s/id' entry\n",
+ call->name);
+ }
+
+ if (call->define_fields) {
+ ret = call->define_fields();
+ if (ret < 0) {
+ pr_warning("Could not initialize trace point"
+ " events/%s\n", call->name);
+ return ret;
+ }
+ entry = debugfs_create_file("filter", 0644, call->dir, call,
+ &ftrace_event_filter_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'%s/filter' entry\n", call->name);
+ }
+
/* A trace may not want to export its format */
if (!call->show_format)
return 0;
@@ -592,7 +812,7 @@ static __init int event_trace_init(void)
if (!d_events)
return 0;
- events_for_each(call) {
+ for_each_event(call) {
/* The linker may leave blanks */
if (!call->name)
continue;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
new file mode 100644
index 0000000..026be41
--- /dev/null
+++ b/kernel/trace/trace_events_filter.c
@@ -0,0 +1,427 @@
+/*
+ * trace_events_filter - generic event filtering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include "trace.h"
+#include "trace_output.h"
+
+static int filter_pred_64(struct filter_pred *pred, void *event)
+{
+ u64 *addr = (u64 *)(event + pred->offset);
+ u64 val = (u64)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_32(struct filter_pred *pred, void *event)
+{
+ u32 *addr = (u32 *)(event + pred->offset);
+ u32 val = (u32)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_16(struct filter_pred *pred, void *event)
+{
+ u16 *addr = (u16 *)(event + pred->offset);
+ u16 val = (u16)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_8(struct filter_pred *pred, void *event)
+{
+ u8 *addr = (u8 *)(event + pred->offset);
+ u8 val = (u8)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_string(struct filter_pred *pred, void *event)
+{
+ char *addr = (char *)(event + pred->offset);
+ int cmp, match;
+
+ cmp = strncmp(addr, pred->str_val, pred->str_len);
+
+ match = (!cmp) ^ pred->not;
+
+ return match;
+}
+
+/* return 1 if event matches, 0 otherwise (discard) */
+int filter_match_preds(struct ftrace_event_call *call, void *rec)
+{
+ int i, matched, and_failed = 0;
+ struct filter_pred *pred;
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (call->preds[i]) {
+ pred = call->preds[i];
+ if (and_failed && !pred->or)
+ continue;
+ matched = pred->fn(pred, rec);
+ if (!matched && !pred->or) {
+ and_failed = 1;
+ continue;
+ } else if (matched && pred->or)
+ return 1;
+ } else
+ break;
+ }
+
+ if (and_failed)
+ return 0;
+
+ return 1;
+}
+
+void filter_print_preds(struct filter_pred **preds, struct trace_seq *s)
+{
+ char *field_name;
+ struct filter_pred *pred;
+ int i;
+
+ if (!preds) {
+ trace_seq_printf(s, "none\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (preds[i]) {
+ pred = preds[i];
+ field_name = pred->field_name;
+ if (i)
+ trace_seq_printf(s, pred->or ? "|| " : "&& ");
+ trace_seq_printf(s, "%s ", field_name);
+ trace_seq_printf(s, pred->not ? "!= " : "== ");
+ if (pred->str_val)
+ trace_seq_printf(s, "%s\n", pred->str_val);
+ else
+ trace_seq_printf(s, "%llu\n", pred->val);
+ } else
+ break;
+ }
+}
+
+static struct ftrace_event_field *
+find_event_field(struct ftrace_event_call *call, char *name)
+{
+ struct ftrace_event_field *field;
+
+ list_for_each_entry(field, &call->fields, link) {
+ if (!strcmp(field->name, name))
+ return field;
+ }
+
+ return NULL;
+}
+
+void filter_free_pred(struct filter_pred *pred)
+{
+ if (!pred)
+ return;
+
+ kfree(pred->field_name);
+ kfree(pred->str_val);
+ kfree(pred);
+}
+
+void filter_free_preds(struct ftrace_event_call *call)
+{
+ int i;
+
+ if (call->preds) {
+ for (i = 0; i < MAX_FILTER_PRED; i++)
+ filter_free_pred(call->preds[i]);
+ kfree(call->preds);
+ call->preds = NULL;
+ }
+}
+
+void filter_free_subsystem_preds(struct event_subsystem *system)
+{
+ struct ftrace_event_call *call = __start_ftrace_events;
+ int i;
+
+ if (system->preds) {
+ for (i = 0; i < MAX_FILTER_PRED; i++)
+ filter_free_pred(system->preds[i]);
+ kfree(system->preds);
+ system->preds = NULL;
+ }
+
+ events_for_each(call) {
+ if (!call->name || !call->regfunc)
+ continue;
+
+ if (!strcmp(call->system, system->name))
+ filter_free_preds(call);
+ }
+}
+
+static int __filter_add_pred(struct ftrace_event_call *call,
+ struct filter_pred *pred)
+{
+ int i;
+
+ if (call->preds && !pred->compound)
+ filter_free_preds(call);
+
+ if (!call->preds) {
+ call->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
+ GFP_KERNEL);
+ if (!call->preds)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (!call->preds[i]) {
+ call->preds[i] = pred;
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int is_string_field(const char *type)
+{
+ if (strchr(type, '[') && strstr(type, "char"))
+ return 1;
+
+ return 0;
+}
+
+int filter_add_pred(struct ftrace_event_call *call, struct filter_pred *pred)
+{
+ struct ftrace_event_field *field;
+
+ field = find_event_field(call, pred->field_name);
+ if (!field)
+ return -EINVAL;
+
+ pred->offset = field->offset;
+
+ if (is_string_field(field->type)) {
+ if (!pred->str_val)
+ return -EINVAL;
+ pred->fn = filter_pred_string;
+ pred->str_len = field->size;
+ return __filter_add_pred(call, pred);
+ } else {
+ if (pred->str_val)
+ return -EINVAL;
+ }
+
+ switch (field->size) {
+ case 8:
+ pred->fn = filter_pred_64;
+ break;
+ case 4:
+ pred->fn = filter_pred_32;
+ break;
+ case 2:
+ pred->fn = filter_pred_16;
+ break;
+ case 1:
+ pred->fn = filter_pred_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return __filter_add_pred(call, pred);
+}
+
+static struct filter_pred *copy_pred(struct filter_pred *pred)
+{
+ struct filter_pred *new_pred = kmalloc(sizeof(*pred), GFP_KERNEL);
+ if (!new_pred)
+ return NULL;
+
+ memcpy(new_pred, pred, sizeof(*pred));
+
+ if (pred->field_name) {
+ new_pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
+ if (!new_pred->field_name) {
+ kfree(new_pred);
+ return NULL;
+ }
+ }
+
+ if (pred->str_val) {
+ new_pred->str_val = kstrdup(pred->str_val, GFP_KERNEL);
+ if (!new_pred->str_val) {
+ filter_free_pred(new_pred);
+ return NULL;
+ }
+ }
+
+ return new_pred;
+}
+
+int filter_add_subsystem_pred(struct event_subsystem *system,
+ struct filter_pred *pred)
+{
+ struct ftrace_event_call *call = __start_ftrace_events;
+ struct filter_pred *event_pred;
+ int i;
+
+ if (system->preds && !pred->compound)
+ filter_free_subsystem_preds(system);
+
+ if (!system->preds) {
+ system->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
+ GFP_KERNEL);
+ if (!system->preds)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (!system->preds[i]) {
+ system->preds[i] = pred;
+ break;
+ }
+ }
+
+ if (i == MAX_FILTER_PRED)
+ return -EINVAL;
+
+ events_for_each(call) {
+ int err;
+
+ if (!call->name || !call->regfunc)
+ continue;
+
+ if (strcmp(call->system, system->name))
+ continue;
+
+ if (!find_event_field(call, pred->field_name))
+ continue;
+
+ event_pred = copy_pred(pred);
+ if (!event_pred)
+ goto oom;
+
+ err = filter_add_pred(call, event_pred);
+ if (err)
+ filter_free_pred(event_pred);
+ if (err == -ENOMEM)
+ goto oom;
+ }
+
+ return 0;
+
+oom:
+ system->preds[i] = NULL;
+ return -ENOMEM;
+}
+
+int filter_parse(char **pbuf, struct filter_pred *pred)
+{
+ char *tmp, *tok, *val_str = NULL;
+ int tok_n = 0;
+
+ /* field ==/!= number, or/and field ==/!= number, number */
+ while ((tok = strsep(pbuf, " \n"))) {
+ if (tok_n == 0) {
+ if (!strcmp(tok, "0")) {
+ pred->clear = 1;
+ return 0;
+ } else if (!strcmp(tok, "&&")) {
+ pred->or = 0;
+ pred->compound = 1;
+ } else if (!strcmp(tok, "||")) {
+ pred->or = 1;
+ pred->compound = 1;
+ } else
+ pred->field_name = tok;
+ tok_n = 1;
+ continue;
+ }
+ if (tok_n == 1) {
+ if (!pred->field_name)
+ pred->field_name = tok;
+ else if (!strcmp(tok, "!="))
+ pred->not = 1;
+ else if (!strcmp(tok, "=="))
+ pred->not = 0;
+ else {
+ pred->field_name = NULL;
+ return -EINVAL;
+ }
+ tok_n = 2;
+ continue;
+ }
+ if (tok_n == 2) {
+ if (pred->compound) {
+ if (!strcmp(tok, "!="))
+ pred->not = 1;
+ else if (!strcmp(tok, "=="))
+ pred->not = 0;
+ else {
+ pred->field_name = NULL;
+ return -EINVAL;
+ }
+ } else {
+ val_str = tok;
+ break; /* done */
+ }
+ tok_n = 3;
+ continue;
+ }
+ if (tok_n == 3) {
+ val_str = tok;
+ break; /* done */
+ }
+ }
+
+ pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
+ if (!pred->field_name)
+ return -ENOMEM;
+
+ pred->val = simple_strtoull(val_str, &tmp, 10);
+ if (tmp == val_str) {
+ pred->str_val = kstrdup(val_str, GFP_KERNEL);
+ if (!pred->str_val)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
index 5117c43..30743f7 100644
--- a/kernel/trace/trace_events_stage_2.h
+++ b/kernel/trace/trace_events_stage_2.h
@@ -129,3 +129,48 @@ ftrace_format_##call(struct trace_seq *s) \
}
#include <trace/trace_event_types.h>
+
+#undef __field
+#define __field(type, item) \
+ ret = trace_define_field(event_call, #type, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item)); \
+ if (ret) \
+ return ret;
+
+#undef __array
+#define __array(type, item, len) \
+ ret = trace_define_field(event_call, #type "[" #len "]", #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item)); \
+ if (ret) \
+ return ret;
+
+#define __common_field(type, item) \
+ ret = trace_define_field(event_call, #type, "common_" #item, \
+ offsetof(typeof(field.ent), item), \
+ sizeof(field.ent.item)); \
+ if (ret) \
+ return ret;
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
+int \
+ftrace_define_fields_##call(void) \
+{ \
+ struct ftrace_raw_##call field; \
+ struct ftrace_event_call *event_call = &event_##call; \
+ int ret; \
+ \
+ __common_field(unsigned char, type); \
+ __common_field(unsigned char, flags); \
+ __common_field(unsigned char, preempt_count); \
+ __common_field(int, pid); \
+ __common_field(int, tgid); \
+ \
+ tstruct; \
+ \
+ return ret; \
+}
+
+#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h
index ae2e323d..9d2fa78 100644
--- a/kernel/trace/trace_events_stage_3.h
+++ b/kernel/trace/trace_events_stage_3.h
@@ -109,6 +109,40 @@
#undef TP_FMT
#define TP_FMT(fmt, args...) fmt "\n", ##args
+#ifdef CONFIG_EVENT_PROFILE
+#define _TRACE_PROFILE(call, proto, args) \
+static void ftrace_profile_##call(proto) \
+{ \
+ extern void perf_tpcounter_event(int); \
+ perf_tpcounter_event(event_##call.id); \
+} \
+ \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
+{ \
+ int ret = 0; \
+ \
+ if (!atomic_inc_return(&call->profile_count)) \
+ ret = register_trace_##call(ftrace_profile_##call); \
+ \
+ return ret; \
+} \
+ \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
+{ \
+ if (atomic_add_negative(-1, &call->profile_count)) \
+ unregister_trace_##call(ftrace_profile_##call); \
+}
+
+#define _TRACE_PROFILE_INIT(call) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = ftrace_profile_enable_##call, \
+ .profile_disable = ftrace_profile_disable_##call,
+
+#else
+#define _TRACE_PROFILE(call, proto, args)
+#define _TRACE_PROFILE_INIT(call)
+#endif
+
#define _TRACE_FORMAT(call, proto, args, fmt) \
static void ftrace_event_##call(proto) \
{ \
@@ -130,18 +164,33 @@ static void ftrace_unreg_event_##call(void) \
{ \
unregister_trace_##call(ftrace_event_##call); \
} \
-
+ \
+static struct ftrace_event_call event_##call; \
+ \
+static int ftrace_init_event_##call(void) \
+{ \
+ int id; \
+ \
+ id = register_ftrace_event(NULL); \
+ if (!id) \
+ return -ENODEV; \
+ event_##call.id = id; \
+ return 0; \
+}
#undef TRACE_FORMAT
#define TRACE_FORMAT(call, proto, args, fmt) \
_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
+_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.system = __stringify(TRACE_SYSTEM), \
+ .raw_init = ftrace_init_event_##call, \
.regfunc = ftrace_reg_event_##call, \
.unregfunc = ftrace_unreg_event_##call, \
+ _TRACE_PROFILE_INIT(call) \
}
#undef __entry
@@ -149,11 +198,13 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
\
static struct ftrace_event_call event_##call; \
\
static void ftrace_raw_event_##call(proto) \
{ \
+ struct ftrace_event_call *call = &event_##call; \
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
unsigned long irq_flags; \
@@ -171,7 +222,11 @@ static void ftrace_raw_event_##call(proto) \
\
assign; \
\
- trace_current_buffer_unlock_commit(event, irq_flags, pc); \
+ if (call->preds && !filter_match_preds(call, entry)) \
+ ring_buffer_event_discard(event); \
+ \
+ trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
+ \
} \
\
static int ftrace_raw_reg_event_##call(void) \
@@ -202,6 +257,7 @@ static int ftrace_raw_init_event_##call(void) \
if (!id) \
return -ENODEV; \
event_##call.id = id; \
+ INIT_LIST_HEAD(&event_##call.fields); \
return 0; \
} \
\
@@ -214,4 +270,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \
.show_format = ftrace_format_##call, \
+ .define_fields = ftrace_define_fields_##call, \
+ _TRACE_PROFILE_INIT(call) \
}
+
+#include <trace/trace_event_types.h>
+
+#undef _TRACE_PROFILE
+#undef _TRACE_PROFILE_INIT
+
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6004cca..d28687e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -14,6 +14,11 @@
#include "trace.h"
#include "trace_output.h"
+struct fgraph_data {
+ pid_t last_pid;
+ int depth;
+};
+
#define TRACE_GRAPH_INDENT 2
/* Flag options */
@@ -52,9 +57,9 @@ static struct tracer_flags tracer_flags = {
/* Add a function return address to the trace stack on thread info.*/
int
-ftrace_push_return_trace(unsigned long ret, unsigned long long time,
- unsigned long func, int *depth)
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
{
+ unsigned long long calltime;
int index;
if (!current->ret_stack)
@@ -66,11 +71,13 @@ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
return -EBUSY;
}
+ calltime = trace_clock_local();
+
index = ++current->curr_ret_stack;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
- current->ret_stack[index].calltime = time;
+ current->ret_stack[index].calltime = calltime;
*depth = index;
return 0;
@@ -231,16 +238,16 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
/* If the pid changed since the last trace, output this event */
static enum print_line_t
-verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
+verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
{
pid_t prev_pid;
pid_t *last_pid;
int ret;
- if (!last_pids_cpu)
+ if (!data)
return TRACE_TYPE_HANDLED;
- last_pid = per_cpu_ptr(last_pids_cpu, cpu);
+ last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
if (*last_pid == pid)
return TRACE_TYPE_HANDLED;
@@ -471,6 +478,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry,
struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
{
+ struct fgraph_data *data = iter->private;
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
@@ -481,6 +489,18 @@ print_graph_entry_leaf(struct trace_iterator *iter,
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
+ if (data) {
+ int cpu = iter->cpu;
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
+
+ /*
+ * Comments display at + 1 to depth. Since
+ * this is a leaf function, keep the comments
+ * equal to this depth.
+ */
+ *depth = call->depth - 1;
+ }
+
/* Overhead */
ret = print_graph_overhead(duration, s);
if (!ret)
@@ -512,12 +532,21 @@ print_graph_entry_leaf(struct trace_iterator *iter,
}
static enum print_line_t
-print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
- struct trace_seq *s, pid_t pid, int cpu)
+print_graph_entry_nested(struct trace_iterator *iter,
+ struct ftrace_graph_ent_entry *entry,
+ struct trace_seq *s, int cpu)
{
- int i;
- int ret;
struct ftrace_graph_ent *call = &entry->graph_ent;
+ struct fgraph_data *data = iter->private;
+ int ret;
+ int i;
+
+ if (data) {
+ int cpu = iter->cpu;
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
+
+ *depth = call->depth;
+ }
/* No overhead */
ret = print_graph_overhead(-1, s);
@@ -554,24 +583,24 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
}
static enum print_line_t
-print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
- struct trace_iterator *iter)
+print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
+ int type, unsigned long addr)
{
- int ret;
- int cpu = iter->cpu;
- pid_t *last_entry = iter->private;
+ struct fgraph_data *data = iter->private;
struct trace_entry *ent = iter->ent;
- struct ftrace_graph_ent *call = &field->graph_ent;
- struct ftrace_graph_ret_entry *leaf_ret;
+ int cpu = iter->cpu;
+ int ret;
/* Pid */
- if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE)
+ if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- /* Interrupt */
- ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (type) {
+ /* Interrupt */
+ ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
/* Absolute time */
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
@@ -598,11 +627,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE;
}
+ return 0;
+}
+
+static enum print_line_t
+print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
+ struct trace_iterator *iter)
+{
+ int cpu = iter->cpu;
+ struct ftrace_graph_ent *call = &field->graph_ent;
+ struct ftrace_graph_ret_entry *leaf_ret;
+
+ if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
+ return TRACE_TYPE_PARTIAL_LINE;
+
leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret)
return print_graph_entry_leaf(iter, field, leaf_ret, s);
else
- return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
+ return print_graph_entry_nested(iter, field, s, cpu);
}
@@ -610,40 +653,27 @@ static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
struct trace_entry *ent, struct trace_iterator *iter)
{
- int i;
- int ret;
- int cpu = iter->cpu;
- pid_t *last_pid = iter->private, pid = ent->pid;
unsigned long long duration = trace->rettime - trace->calltime;
+ struct fgraph_data *data = iter->private;
+ pid_t pid = ent->pid;
+ int cpu = iter->cpu;
+ int ret;
+ int i;
- /* Pid */
- if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (data) {
+ int cpu = iter->cpu;
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
- /* Absolute time */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
- ret = print_graph_abs_time(iter->ts, s);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ /*
+ * Comments display at + 1 to depth. This is the
+ * return from a function, we now want the comments
+ * to display at the same level of the bracket.
+ */
+ *depth = trace->depth - 1;
}
- /* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
- ret = print_graph_cpu(s, cpu);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
-
- /* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
- ret = print_graph_proc(s, ent->pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
-
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ if (print_graph_prologue(iter, s, 0, 0))
+ return TRACE_TYPE_PARTIAL_LINE;
/* Overhead */
ret = print_graph_overhead(duration, s);
@@ -684,42 +714,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
}
static enum print_line_t
-print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
- struct trace_entry *ent, struct trace_iterator *iter)
+print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
+ struct trace_iterator *iter)
{
- int i;
+ unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+ struct fgraph_data *data = iter->private;
+ struct trace_event *event;
+ int depth = 0;
int ret;
- int cpu = iter->cpu;
- pid_t *last_pid = iter->private;
-
- /* Pid */
- if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
-
- /* Absolute time */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
- ret = print_graph_abs_time(iter->ts, s);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
-
- /* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
- ret = print_graph_cpu(s, cpu);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ int i;
- /* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
- ret = print_graph_proc(s, ent->pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (data)
+ depth = per_cpu_ptr(data, iter->cpu)->depth;
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ if (print_graph_prologue(iter, s, 0, 0))
+ return TRACE_TYPE_PARTIAL_LINE;
/* No overhead */
ret = print_graph_overhead(-1, s);
@@ -734,8 +743,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
}
/* Indentation */
- if (trace->depth > 0)
- for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
+ if (depth > 0)
+ for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -746,9 +755,26 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_bprintf(s, trace->fmt, trace->buf);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ switch (iter->ent->type) {
+ case TRACE_BPRINT:
+ ret = trace_print_bprintk_msg_only(iter);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ break;
+ case TRACE_PRINT:
+ ret = trace_print_printk_msg_only(iter);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ break;
+ default:
+ event = ftrace_find_event(ent->type);
+ if (!event)
+ return TRACE_TYPE_UNHANDLED;
+
+ ret = event->trace(iter, sym_flags);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ }
/* Strip ending newline */
if (s->buffer[s->len - 1] == '\n') {
@@ -767,8 +793,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
- struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
switch (entry->type) {
case TRACE_GRAPH_ENT: {
@@ -781,14 +807,11 @@ print_graph_function(struct trace_iterator *iter)
trace_assign_type(field, entry);
return print_graph_return(&field->ret, s, entry, iter);
}
- case TRACE_BPRINT: {
- struct bprint_entry *field;
- trace_assign_type(field, entry);
- return print_graph_comment(field, s, entry, iter);
- }
default:
- return TRACE_TYPE_UNHANDLED;
+ return print_graph_comment(s, entry, iter);
}
+
+ return TRACE_TYPE_HANDLED;
}
static void print_graph_headers(struct seq_file *s)
@@ -820,19 +843,21 @@ static void print_graph_headers(struct seq_file *s)
static void graph_trace_open(struct trace_iterator *iter)
{
- /* pid on the last trace processed */
- pid_t *last_pid = alloc_percpu(pid_t);
+ /* pid and depth on the last trace processed */
+ struct fgraph_data *data = alloc_percpu(struct fgraph_data);
int cpu;
- if (!last_pid)
+ if (!data)
pr_warning("function graph tracer: not enough memory\n");
else
for_each_possible_cpu(cpu) {
- pid_t *pid = per_cpu_ptr(last_pid, cpu);
+ pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
*pid = -1;
+ *depth = 0;
}
- iter->private = last_pid;
+ iter->private = data;
}
static void graph_trace_close(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index f095916..8e37fcd 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -359,5 +359,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
int mmio_trace_printk(const char *fmt, va_list args)
{
- return trace_vprintk(0, -1, fmt, args);
+ return trace_vprintk(0, fmt, args);
}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 9aa84bd..394f944 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -91,6 +91,7 @@ struct tracer nop_trace __read_mostly =
.name = "nop",
.init = nop_trace_init,
.reset = nop_trace_reset,
+ .wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_nop,
#endif
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 6a4c9de..d72b9a6 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -19,6 +19,38 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE + 1;
+enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct bprint_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_bprintf(s, field->fmt, field->buf);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct print_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_printf(s, "%s", field->buf);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
/**
* trace_seq_printf - sequence printing of trace information
* @s: trace sequence descriptor
@@ -105,7 +137,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
return 1;
}
-int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
+int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
{
if (len > ((PAGE_SIZE - 1) - s->len))
return 0;
@@ -116,10 +148,10 @@ int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
return len;
}
-int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
+int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
{
unsigned char hex[HEX_CHARS];
- unsigned char *data = mem;
+ const unsigned char *data = mem;
int i, j;
#ifdef __BIG_ENDIAN
@@ -135,6 +167,19 @@ int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
return trace_seq_putmem(s, hex, j);
}
+void *trace_seq_reserve(struct trace_seq *s, size_t len)
+{
+ void *ret;
+
+ if (len > ((PAGE_SIZE - 1) - s->len))
+ return NULL;
+
+ ret = s->buffer + s->len;
+ s->len += len;
+
+ return ret;
+}
+
int trace_seq_path(struct trace_seq *s, struct path *path)
{
unsigned char *p;
@@ -449,6 +494,11 @@ int register_ftrace_event(struct trace_event *event)
mutex_lock(&trace_event_mutex);
+ if (!event) {
+ ret = next_event_type++;
+ goto out;
+ }
+
if (!event->type)
event->type = next_event_type++;
else if (event->type > __TRACE_LAST_TYPE) {
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 3b90e6a..e0bde39 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -15,6 +15,11 @@ struct trace_event {
trace_print_func binary;
};
+extern enum print_line_t
+trace_print_bprintk_msg_only(struct trace_iterator *iter);
+extern enum print_line_t
+trace_print_printk_msg_only(struct trace_iterator *iter);
+
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int
@@ -24,24 +29,27 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt);
-int trace_seq_puts(struct trace_seq *s, const char *str);
-int trace_seq_putc(struct trace_seq *s, unsigned char c);
-int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len);
-int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len);
-int trace_seq_path(struct trace_seq *s, struct path *path);
-int seq_print_userip_objs(const struct userstack_entry *entry,
- struct trace_seq *s, unsigned long sym_flags);
-int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
- unsigned long ip, unsigned long sym_flags);
-
-int trace_print_context(struct trace_iterator *iter);
-int trace_print_lat_context(struct trace_iterator *iter);
-
-struct trace_event *ftrace_find_event(int type);
-int register_ftrace_event(struct trace_event *event);
-int unregister_ftrace_event(struct trace_event *event);
-
-enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags);
+extern int trace_seq_puts(struct trace_seq *s, const char *str);
+extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
+extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ size_t len);
+extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
+extern int trace_seq_path(struct trace_seq *s, struct path *path);
+extern int seq_print_userip_objs(const struct userstack_entry *entry,
+ struct trace_seq *s, unsigned long sym_flags);
+extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+ unsigned long ip, unsigned long sym_flags);
+
+extern int trace_print_context(struct trace_iterator *iter);
+extern int trace_print_lat_context(struct trace_iterator *iter);
+
+extern struct trace_event *ftrace_find_event(int type);
+extern int register_ftrace_event(struct trace_event *event);
+extern int unregister_ftrace_event(struct trace_event *event);
+
+extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
+ int flags);
#define MAX_MEMHEX_BYTES 8
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 4867852..eb81556 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -112,7 +112,7 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...)
return 0;
va_start(ap, fmt);
- ret = trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap);
+ ret = trace_vbprintk(ip, fmt, ap);
va_end(ap);
return ret;
}
@@ -126,7 +126,7 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
if (!(trace_flags & TRACE_ITER_PRINTK))
return 0;
- return trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap);
+ return trace_vbprintk(ip, fmt, ap);
}
EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
@@ -139,7 +139,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...)
return 0;
va_start(ap, fmt);
- ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
+ ret = trace_vprintk(ip, fmt, ap);
va_end(ap);
return ret;
}
@@ -150,7 +150,7 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
if (!(trace_flags & TRACE_ITER_PRINTK))
return 0;
- return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
+ return trace_vprintk(ip, fmt, ap);
}
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 38856ba..08f4eb2 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -248,6 +248,28 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Maximum number of functions to trace before diagnosing a hang */
+#define GRAPH_MAX_FUNC_TEST 100000000
+
+static void __ftrace_dump(bool disable_tracing);
+static unsigned int graph_hang_thresh;
+
+/* Wrap the real function entry probe to avoid possible hanging */
+static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+{
+ /* This is harmlessly racy, we want to approximately detect a hang */
+ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+ ftrace_graph_stop();
+ printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+ if (ftrace_dump_on_oops)
+ __ftrace_dump(false);
+ return 0;
+ }
+
+ return trace_graph_entry(trace);
+}
+
/*
* Pretty much the same than for the function tracer from which the selftest
* has been borrowed.
@@ -259,15 +281,29 @@ trace_selftest_startup_function_graph(struct tracer *trace,
int ret;
unsigned long count;
- ret = tracer_init(trace, tr);
+ /*
+ * Simulate the init() callback but we attach a watchdog callback
+ * to detect and recover from possible hangs
+ */
+ tracing_reset_online_cpus(tr);
+ ret = register_ftrace_graph(&trace_graph_return,
+ &trace_graph_entry_watchdog);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
}
+ tracing_start_cmdline_record();
/* Sleep for a 1/10 of a second */
msleep(100);
+ /* Have we just recovered from a hang? */
+ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
+ tracing_selftest_disabled = true;
+ ret = -1;
+ goto out;
+ }
+
tracing_stop();
/* check the trace buffer */
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 39310e3..acdebd7 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -75,7 +75,7 @@ static int stat_seq_init(struct tracer_stat_session *session)
{
struct trace_stat_list *iter_entry, *new_entry;
struct tracer_stat *ts = session->ts;
- void *prev_stat;
+ void *stat;
int ret = 0;
int i;
@@ -85,6 +85,10 @@ static int stat_seq_init(struct tracer_stat_session *session)
if (!ts->stat_cmp)
ts->stat_cmp = dummy_cmp;
+ stat = ts->stat_start();
+ if (!stat)
+ goto exit;
+
/*
* The first entry. Actually this is the second, but the first
* one (the stat_list head) is pointless.
@@ -99,14 +103,19 @@ static int stat_seq_init(struct tracer_stat_session *session)
list_add(&new_entry->list, &session->stat_list);
- new_entry->stat = ts->stat_start();
- prev_stat = new_entry->stat;
+ new_entry->stat = stat;
/*
* Iterate over the tracer stat entries and store them in a sorted
* list.
*/
for (i = 1; ; i++) {
+ stat = ts->stat_next(stat, i);
+
+ /* End of insertion */
+ if (!stat)
+ break;
+
new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
if (!new_entry) {
ret = -ENOMEM;
@@ -114,31 +123,23 @@ static int stat_seq_init(struct tracer_stat_session *session)
}
INIT_LIST_HEAD(&new_entry->list);
- new_entry->stat = ts->stat_next(prev_stat, i);
+ new_entry->stat = stat;
- /* End of insertion */
- if (!new_entry->stat)
- break;
-
- list_for_each_entry(iter_entry, &session->stat_list, list) {
+ list_for_each_entry_reverse(iter_entry, &session->stat_list,
+ list) {
/* Insertion with a descendent sorting */
- if (ts->stat_cmp(new_entry->stat,
- iter_entry->stat) > 0) {
-
- list_add_tail(&new_entry->list,
- &iter_entry->list);
- break;
+ if (ts->stat_cmp(iter_entry->stat,
+ new_entry->stat) >= 0) {
- /* The current smaller value */
- } else if (list_is_last(&iter_entry->list,
- &session->stat_list)) {
list_add(&new_entry->list, &iter_entry->list);
break;
}
}
- prev_stat = new_entry->stat;
+ /* The current larger value */
+ if (list_empty(&new_entry->list))
+ list_add(&new_entry->list, &session->stat_list);
}
exit:
mutex_unlock(&session->stat_mutex);
@@ -160,7 +161,7 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
/* If we are in the beginning of the file, print the headers */
if (!*pos && session->ts->stat_headers)
- session->ts->stat_headers(s);
+ return SEQ_START_TOKEN;
return seq_list_start(&session->stat_list, *pos);
}
@@ -169,6 +170,9 @@ static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
{
struct tracer_stat_session *session = s->private;
+ if (p == SEQ_START_TOKEN)
+ return seq_list_start(&session->stat_list, *pos);
+
return seq_list_next(p, &session->stat_list, pos);
}
@@ -183,6 +187,9 @@ static int stat_seq_show(struct seq_file *s, void *v)
struct tracer_stat_session *session = s->private;
struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
+ if (v == SEQ_START_TOKEN)
+ return session->ts->stat_headers(s);
+
return session->ts->stat_show(s, l->stat);
}
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index 9ab035b..797201e 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -196,6 +196,11 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
struct pid *pid;
struct task_struct *tsk;
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
+ seq_printf(s, "\n");
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
pid = find_get_pid(cws->pid);
if (pid) {
tsk = get_pid_task(pid, PIDTYPE_PID);
@@ -208,18 +213,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
put_pid(pid);
}
- spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
- if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
- seq_printf(s, "\n");
- spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
-
return 0;
}
static int workqueue_stat_headers(struct seq_file *s)
{
seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
- seq_printf(s, "# | | | |\n\n");
+ seq_printf(s, "# | | | |\n");
return 0;
}