summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-02-13 22:12:06 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:19:22 (GMT)
commit6faa2909871d8937cb2f79a10e1b21ffe193fac1 (patch)
treef558a94f1553814cc122ab8d9e04c0ebad5262a5 /kernel/trace
parentfcb2fb84301c673ee15ca04e7a2fc965712d49a0 (diff)
downloadlinux-fsl-qoriq-6faa2909871d8937cb2f79a10e1b21ffe193fac1.tar.xz
Reset to 3.12.37
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig104
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/blktrace.c20
-rw-r--r--kernel/trace/ftrace.c31
-rw-r--r--kernel/trace/latency_hist.c1178
-rw-r--r--kernel/trace/ring_buffer.c56
-rw-r--r--kernel/trace/trace.c108
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_irqsoff.c11
-rw-r--r--kernel/trace/trace_output.c18
-rw-r--r--kernel/trace/trace_syscalls.c8
13 files changed, 125 insertions, 1432 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index bbe95b9..015f85a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -192,24 +192,6 @@ config IRQSOFF_TRACER
enabled. This option and the preempt-off timing option can be
used together or separately.)
-config INTERRUPT_OFF_HIST
- bool "Interrupts-off Latency Histogram"
- depends on IRQSOFF_TRACER
- help
- This option generates continuously updated histograms (one per cpu)
- of the duration of time periods with interrupts disabled. The
- histograms are disabled by default. To enable them, write a non-zero
- number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-
- If PREEMPT_OFF_HIST is also selected, additional histograms (one
- per cpu) are generated that accumulate the duration of time periods
- when both interrupts and preemption are disabled. The histogram data
- will be located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/irqsoff
-
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
@@ -234,24 +216,6 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
-config PREEMPT_OFF_HIST
- bool "Preemption-off Latency Histogram"
- depends on PREEMPT_TRACER
- help
- This option generates continuously updated histograms (one per cpu)
- of the duration of time periods with preemption disabled. The
- histograms are disabled by default. To enable them, write a non-zero
- number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-
- If INTERRUPT_OFF_HIST is also selected, additional histograms (one
- per cpu) are generated that accumulate the duration of time periods
- when both interrupts and preemption are disabled. The histogram data
- will be located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/preemptoff
-
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
@@ -262,74 +226,6 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
-config WAKEUP_LATENCY_HIST
- bool "Scheduling Latency Histogram"
- depends on SCHED_TRACER
- help
- This option generates continuously updated histograms (one per cpu)
- of the scheduling latency of the highest priority task.
- The histograms are disabled by default. To enable them, write a
- non-zero number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/wakeup
-
- Two different algorithms are used, one to determine the latency of
- processes that exclusively use the highest priority of the system and
- another one to determine the latency of processes that share the
- highest system priority with other processes. The former is used to
- improve hardware and system software, the latter to optimize the
- priority design of a given system. The histogram data will be
- located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/wakeup
-
- and
-
- /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
-
- If both Scheduling Latency Histogram and Missed Timer Offsets
- Histogram are selected, additional histogram data will be collected
- that contain, in addition to the wakeup latency, the timer latency, in
- case the wakeup was triggered by an expired timer. These histograms
- are available in the
-
- /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-
- directory. They reflect the apparent interrupt and scheduling latency
- and are best suitable to determine the worst-case latency of a given
- system. To enable these histograms, write a non-zero number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-
-config MISSED_TIMER_OFFSETS_HIST
- depends on HIGH_RES_TIMERS
- select GENERIC_TRACER
- bool "Missed Timer Offsets Histogram"
- help
- Generate a histogram of missed timer offsets in microseconds. The
- histograms are disabled by default. To enable them, write a non-zero
- number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
-
- The histogram data will be located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
-
- If both Scheduling Latency Histogram and Missed Timer Offsets
- Histogram are selected, additional histogram data will be collected
- that contain, in addition to the wakeup latency, the timer latency, in
- case the wakeup was triggered by an expired timer. These histograms
- are available in the
-
- /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-
- directory. They reflect the apparent interrupt and scheduling latency
- and are best suitable to determine the worst-case latency of a given
- system. To enable these histograms, write a non-zero number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index f5e0243..d7e2068 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -34,10 +34,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
-obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
-obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
-obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
-obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7f727b3..e0e5f73 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -703,6 +703,7 @@ void blk_trace_shutdown(struct request_queue *q)
* blk_add_trace_rq - Add a trace for a request oriented action
* @q: queue the io is for
* @rq: the source request
+ * @nr_bytes: number of completed bytes
* @what: the action
*
* Description:
@@ -710,7 +711,7 @@ void blk_trace_shutdown(struct request_queue *q)
*
**/
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
- u32 what)
+ unsigned int nr_bytes, u32 what)
{
struct blk_trace *bt = q->blk_trace;
@@ -719,11 +720,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
+ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
what, rq->errors, rq->cmd_len, rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
rq->cmd_flags, what, rq->errors, 0, NULL);
}
}
@@ -731,33 +732,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
static void blk_add_trace_rq_abort(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
}
static void blk_add_trace_rq_insert(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
}
static void blk_add_trace_rq_issue(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
}
static void blk_add_trace_rq_requeue(void *ignore,
struct request_queue *q,
struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
}
static void blk_add_trace_rq_complete(void *ignore,
struct request_queue *q,
- struct request *rq)
+ struct request *rq,
+ unsigned int nr_bytes)
{
- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
+ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
}
/**
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e66411f..d2ab10b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -331,12 +331,12 @@ static void update_ftrace_function(void)
func = ftrace_ops_list_func;
}
+ update_function_graph_func();
+
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
- update_function_graph_func();
-
/*
* If we are using the list function, it doesn't care
* about the function_trace_ops.
@@ -4252,16 +4252,11 @@ static void ftrace_init_module(struct module *mod,
ftrace_process_locs(mod, start, end);
}
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
{
- struct module *mod = data;
-
- if (val == MODULE_STATE_COMING)
- ftrace_init_module(mod, mod->ftrace_callsites,
- mod->ftrace_callsites +
- mod->num_ftrace_callsites);
- return 0;
+ ftrace_init_module(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites +
+ mod->num_ftrace_callsites);
}
static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4275,11 +4270,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
return 0;
}
#else
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
static int ftrace_module_notify_exit(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -4287,11 +4277,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
}
#endif /* CONFIG_MODULES */
-struct notifier_block ftrace_module_enter_nb = {
- .notifier_call = ftrace_module_notify_enter,
- .priority = INT_MAX, /* Run before anything that can use kprobes */
-};
-
struct notifier_block ftrace_module_exit_nb = {
.notifier_call = ftrace_module_notify_exit,
.priority = INT_MIN, /* Run after anything that can remove kprobes */
@@ -4328,10 +4313,6 @@ void __init ftrace_init(void)
__start_mcount_loc,
__stop_mcount_loc);
- ret = register_module_notifier(&ftrace_module_enter_nb);
- if (ret)
- pr_warning("Failed to register trace ftrace module enter notifier\n");
-
ret = register_module_notifier(&ftrace_module_exit_nb);
if (ret)
pr_warning("Failed to register trace ftrace module exit notifier\n");
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
deleted file mode 100644
index 66a69eb..0000000
--- a/kernel/trace/latency_hist.c
+++ /dev/null
@@ -1,1178 +0,0 @@
-/*
- * kernel/trace/latency_hist.c
- *
- * Add support for histograms of preemption-off latency and
- * interrupt-off latency and wakeup latency, it depends on
- * Real-Time Preemption Support.
- *
- * Copyright (C) 2005 MontaVista Software, Inc.
- * Yi Yang <yyang@ch.mvista.com>
- *
- * Converted to work with the new latency tracer.
- * Copyright (C) 2008 Red Hat, Inc.
- * Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/percpu.h>
-#include <linux/kallsyms.h>
-#include <linux/uaccess.h>
-#include <linux/sched.h>
-#include <linux/sched/rt.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <asm/div64.h>
-
-#include "trace.h"
-#include <trace/events/sched.h>
-
-#define NSECS_PER_USECS 1000L
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/hist.h>
-
-enum {
- IRQSOFF_LATENCY = 0,
- PREEMPTOFF_LATENCY,
- PREEMPTIRQSOFF_LATENCY,
- WAKEUP_LATENCY,
- WAKEUP_LATENCY_SHAREDPRIO,
- MISSED_TIMER_OFFSETS,
- TIMERANDWAKEUP_LATENCY,
- MAX_LATENCY_TYPE,
-};
-
-#define MAX_ENTRY_NUM 10240
-
-struct hist_data {
- atomic_t hist_mode; /* 0 log, 1 don't log */
- long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
- long min_lat;
- long max_lat;
- unsigned long long below_hist_bound_samples;
- unsigned long long above_hist_bound_samples;
- long long accumulate_lat;
- unsigned long long total_samples;
- unsigned long long hist_array[MAX_ENTRY_NUM];
-};
-
-struct enable_data {
- int latency_type;
- int enabled;
-};
-
-static char *latency_hist_dir_root = "latency_hist";
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
-static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
-static char *irqsoff_hist_dir = "irqsoff";
-static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
-static DEFINE_PER_CPU(int, hist_irqsoff_counting);
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
-static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
-static char *preemptoff_hist_dir = "preemptoff";
-static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
-static DEFINE_PER_CPU(int, hist_preemptoff_counting);
-#endif
-
-#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
-static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
-static char *preemptirqsoff_hist_dir = "preemptirqsoff";
-static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
-static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
-#endif
-
-#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
-static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
-static struct enable_data preemptirqsoff_enabled_data = {
- .latency_type = PREEMPTIRQSOFF_LATENCY,
- .enabled = 0,
-};
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-struct maxlatproc_data {
- char comm[FIELD_SIZEOF(struct task_struct, comm)];
- char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
- int pid;
- int current_pid;
- int prio;
- int current_prio;
- long latency;
- long timeroffset;
- cycle_t timestamp;
-};
-#endif
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
-static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
-static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
-static char *wakeup_latency_hist_dir = "wakeup";
-static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
-static notrace void probe_wakeup_latency_hist_start(void *v,
- struct task_struct *p, int success);
-static notrace void probe_wakeup_latency_hist_stop(void *v,
- struct task_struct *prev, struct task_struct *next);
-static notrace void probe_sched_migrate_task(void *,
- struct task_struct *task, int cpu);
-static struct enable_data wakeup_latency_enabled_data = {
- .latency_type = WAKEUP_LATENCY,
- .enabled = 0,
-};
-static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
-static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
-static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
-static DEFINE_PER_CPU(int, wakeup_sharedprio);
-static unsigned long wakeup_pid;
-#endif
-
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
-static char *missed_timer_offsets_dir = "missed_timer_offsets";
-static notrace void probe_hrtimer_interrupt(void *v, int cpu,
- long long offset, struct task_struct *curr, struct task_struct *task);
-static struct enable_data missed_timer_offsets_enabled_data = {
- .latency_type = MISSED_TIMER_OFFSETS,
- .enabled = 0,
-};
-static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
-static unsigned long missed_timer_offsets_pid;
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
-static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
-static struct enable_data timerandwakeup_enabled_data = {
- .latency_type = TIMERANDWAKEUP_LATENCY,
- .enabled = 0,
-};
-static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
-#endif
-
-void notrace latency_hist(int latency_type, int cpu, long latency,
- long timeroffset, cycle_t stop,
- struct task_struct *p)
-{
- struct hist_data *my_hist;
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- struct maxlatproc_data *mp = NULL;
-#endif
-
- if (!cpu_possible(cpu) || latency_type < 0 ||
- latency_type >= MAX_LATENCY_TYPE)
- return;
-
- switch (latency_type) {
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- case IRQSOFF_LATENCY:
- my_hist = &per_cpu(irqsoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_PREEMPT_OFF_HIST
- case PREEMPTOFF_LATENCY:
- my_hist = &per_cpu(preemptoff_hist, cpu);
- break;
-#endif
-#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- my_hist = &per_cpu(preemptirqsoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- my_hist = &per_cpu(wakeup_latency_hist, cpu);
- mp = &per_cpu(wakeup_maxlatproc, cpu);
- break;
- case WAKEUP_LATENCY_SHAREDPRIO:
- my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
- mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- my_hist = &per_cpu(missed_timer_offsets, cpu);
- mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
- break;
-#endif
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- case TIMERANDWAKEUP_LATENCY:
- my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
- mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
- break;
-#endif
-
- default:
- return;
- }
-
- latency += my_hist->offset;
-
- if (atomic_read(&my_hist->hist_mode) == 0)
- return;
-
- if (latency < 0 || latency >= MAX_ENTRY_NUM) {
- if (latency < 0)
- my_hist->below_hist_bound_samples++;
- else
- my_hist->above_hist_bound_samples++;
- } else
- my_hist->hist_array[latency]++;
-
- if (unlikely(latency > my_hist->max_lat ||
- my_hist->min_lat == LONG_MAX)) {
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- if (latency_type == WAKEUP_LATENCY ||
- latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
- latency_type == MISSED_TIMER_OFFSETS ||
- latency_type == TIMERANDWAKEUP_LATENCY) {
- strncpy(mp->comm, p->comm, sizeof(mp->comm));
- strncpy(mp->current_comm, current->comm,
- sizeof(mp->current_comm));
- mp->pid = task_pid_nr(p);
- mp->current_pid = task_pid_nr(current);
- mp->prio = p->prio;
- mp->current_prio = current->prio;
- mp->latency = latency;
- mp->timeroffset = timeroffset;
- mp->timestamp = stop;
- }
-#endif
- my_hist->max_lat = latency;
- }
- if (unlikely(latency < my_hist->min_lat))
- my_hist->min_lat = latency;
- my_hist->total_samples++;
- my_hist->accumulate_lat += latency;
-}
-
-static void *l_start(struct seq_file *m, loff_t *pos)
-{
- loff_t *index_ptr = NULL;
- loff_t index = *pos;
- struct hist_data *my_hist = m->private;
-
- if (index == 0) {
- char minstr[32], avgstr[32], maxstr[32];
-
- atomic_dec(&my_hist->hist_mode);
-
- if (likely(my_hist->total_samples)) {
- long avg = (long) div64_s64(my_hist->accumulate_lat,
- my_hist->total_samples);
- snprintf(minstr, sizeof(minstr), "%ld",
- my_hist->min_lat - my_hist->offset);
- snprintf(avgstr, sizeof(avgstr), "%ld",
- avg - my_hist->offset);
- snprintf(maxstr, sizeof(maxstr), "%ld",
- my_hist->max_lat - my_hist->offset);
- } else {
- strcpy(minstr, "<undef>");
- strcpy(avgstr, minstr);
- strcpy(maxstr, minstr);
- }
-
- seq_printf(m, "#Minimum latency: %s microseconds\n"
- "#Average latency: %s microseconds\n"
- "#Maximum latency: %s microseconds\n"
- "#Total samples: %llu\n"
- "#There are %llu samples lower than %ld"
- " microseconds.\n"
- "#There are %llu samples greater or equal"
- " than %ld microseconds.\n"
- "#usecs\t%16s\n",
- minstr, avgstr, maxstr,
- my_hist->total_samples,
- my_hist->below_hist_bound_samples,
- -my_hist->offset,
- my_hist->above_hist_bound_samples,
- MAX_ENTRY_NUM - my_hist->offset,
- "samples");
- }
- if (index < MAX_ENTRY_NUM) {
- index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
- if (index_ptr)
- *index_ptr = index;
- }
-
- return index_ptr;
-}
-
-static void *l_next(struct seq_file *m, void *p, loff_t *pos)
-{
- loff_t *index_ptr = p;
- struct hist_data *my_hist = m->private;
-
- if (++*pos >= MAX_ENTRY_NUM) {
- atomic_inc(&my_hist->hist_mode);
- return NULL;
- }
- *index_ptr = *pos;
- return index_ptr;
-}
-
-static void l_stop(struct seq_file *m, void *p)
-{
- kfree(p);
-}
-
-static int l_show(struct seq_file *m, void *p)
-{
- int index = *(loff_t *) p;
- struct hist_data *my_hist = m->private;
-
- seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
- my_hist->hist_array[index]);
- return 0;
-}
-
-static const struct seq_operations latency_hist_seq_op = {
- .start = l_start,
- .next = l_next,
- .stop = l_stop,
- .show = l_show
-};
-
-static int latency_hist_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- ret = seq_open(file, &latency_hist_seq_op);
- if (!ret) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations latency_hist_fops = {
- .open = latency_hist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static void clear_maxlatprocdata(struct maxlatproc_data *mp)
-{
- mp->comm[0] = mp->current_comm[0] = '\0';
- mp->prio = mp->current_prio = mp->pid = mp->current_pid =
- mp->latency = mp->timeroffset = -1;
- mp->timestamp = 0;
-}
-#endif
-
-static void hist_reset(struct hist_data *hist)
-{
- atomic_dec(&hist->hist_mode);
-
- memset(hist->hist_array, 0, sizeof(hist->hist_array));
- hist->below_hist_bound_samples = 0ULL;
- hist->above_hist_bound_samples = 0ULL;
- hist->min_lat = LONG_MAX;
- hist->max_lat = LONG_MIN;
- hist->total_samples = 0ULL;
- hist->accumulate_lat = 0LL;
-
- atomic_inc(&hist->hist_mode);
-}
-
-static ssize_t
-latency_hist_reset(struct file *file, const char __user *a,
- size_t size, loff_t *off)
-{
- int cpu;
- struct hist_data *hist = NULL;
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- struct maxlatproc_data *mp = NULL;
-#endif
- off_t latency_type = (off_t) file->private_data;
-
- for_each_online_cpu(cpu) {
-
- switch (latency_type) {
-#ifdef CONFIG_PREEMPT_OFF_HIST
- case PREEMPTOFF_LATENCY:
- hist = &per_cpu(preemptoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- case IRQSOFF_LATENCY:
- hist = &per_cpu(irqsoff_hist, cpu);
- break;
-#endif
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- hist = &per_cpu(preemptirqsoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- hist = &per_cpu(wakeup_latency_hist, cpu);
- mp = &per_cpu(wakeup_maxlatproc, cpu);
- break;
- case WAKEUP_LATENCY_SHAREDPRIO:
- hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
- mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- hist = &per_cpu(missed_timer_offsets, cpu);
- mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
- break;
-#endif
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- case TIMERANDWAKEUP_LATENCY:
- hist = &per_cpu(timerandwakeup_latency_hist, cpu);
- mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
- break;
-#endif
- }
-
- hist_reset(hist);
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- if (latency_type == WAKEUP_LATENCY ||
- latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
- latency_type == MISSED_TIMER_OFFSETS ||
- latency_type == TIMERANDWAKEUP_LATENCY)
- clear_maxlatprocdata(mp);
-#endif
- }
-
- return size;
-}
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static ssize_t
-show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- char buf[64];
- int r;
- unsigned long *this_pid = file->private_data;
-
- r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t do_pid(struct file *file, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[64];
- unsigned long pid;
- unsigned long *this_pid = file->private_data;
-
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = '\0';
-
- if (kstrtoul(buf, 10, &pid))
- return -EINVAL;
-
- *this_pid = pid;
-
- return cnt;
-}
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static ssize_t
-show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- int r;
- struct maxlatproc_data *mp = file->private_data;
- int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
- unsigned long long t;
- unsigned long usecs, secs;
- char *buf;
-
- if (mp->pid == -1 || mp->current_pid == -1) {
- buf = "(none)\n";
- return simple_read_from_buffer(ubuf, cnt, ppos, buf,
- strlen(buf));
- }
-
- buf = kmalloc(strmaxlen, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- t = ns2usecs(mp->timestamp);
- usecs = do_div(t, USEC_PER_SEC);
- secs = (unsigned long) t;
- r = snprintf(buf, strmaxlen,
- "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
- MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
- mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
- secs, usecs);
- r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
- kfree(buf);
- return r;
-}
-#endif
-
-static ssize_t
-show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- char buf[64];
- struct enable_data *ed = file->private_data;
- int r;
-
- r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- char buf[64];
- long enable;
- struct enable_data *ed = file->private_data;
-
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- if (kstrtoul(buf, 10, &enable))
- return -EINVAL;
-
- if ((enable && ed->enabled) || (!enable && !ed->enabled))
- return cnt;
-
- if (enable) {
- int ret;
-
- switch (ed->latency_type) {
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- ret = register_trace_preemptirqsoff_hist(
- probe_preemptirqsoff_hist, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_preemptirqsoff_hist "
- "to trace_preemptirqsoff_hist\n");
- return ret;
- }
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- ret = register_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_wakeup_latency_hist_start "
- "to trace_sched_wakeup\n");
- return ret;
- }
- ret = register_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_wakeup_latency_hist_start "
- "to trace_sched_wakeup_new\n");
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- return ret;
- }
- ret = register_trace_sched_switch(
- probe_wakeup_latency_hist_stop, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_wakeup_latency_hist_stop "
- "to trace_sched_switch\n");
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- return ret;
- }
- ret = register_trace_sched_migrate_task(
- probe_sched_migrate_task, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_sched_migrate_task "
- "to trace_sched_migrate_task\n");
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_switch(
- probe_wakeup_latency_hist_stop, NULL);
- return ret;
- }
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- ret = register_trace_hrtimer_interrupt(
- probe_hrtimer_interrupt, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_hrtimer_interrupt "
- "to trace_hrtimer_interrupt\n");
- return ret;
- }
- break;
-#endif
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- case TIMERANDWAKEUP_LATENCY:
- if (!wakeup_latency_enabled_data.enabled ||
- !missed_timer_offsets_enabled_data.enabled)
- return -EINVAL;
- break;
-#endif
- default:
- break;
- }
- } else {
- switch (ed->latency_type) {
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- {
- int cpu;
-
- unregister_trace_preemptirqsoff_hist(
- probe_preemptirqsoff_hist, NULL);
- for_each_online_cpu(cpu) {
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- per_cpu(hist_irqsoff_counting,
- cpu) = 0;
-#endif
-#ifdef CONFIG_PREEMPT_OFF_HIST
- per_cpu(hist_preemptoff_counting,
- cpu) = 0;
-#endif
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- per_cpu(hist_preemptirqsoff_counting,
- cpu) = 0;
-#endif
- }
- }
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- {
- int cpu;
-
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_switch(
- probe_wakeup_latency_hist_stop, NULL);
- unregister_trace_sched_migrate_task(
- probe_sched_migrate_task, NULL);
-
- for_each_online_cpu(cpu) {
- per_cpu(wakeup_task, cpu) = NULL;
- per_cpu(wakeup_sharedprio, cpu) = 0;
- }
- }
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- timerandwakeup_enabled_data.enabled = 0;
-#endif
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- unregister_trace_hrtimer_interrupt(
- probe_hrtimer_interrupt, NULL);
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- timerandwakeup_enabled_data.enabled = 0;
-#endif
- break;
-#endif
- default:
- break;
- }
- }
- ed->enabled = enable;
- return cnt;
-}
-
-static const struct file_operations latency_hist_reset_fops = {
- .open = tracing_open_generic,
- .write = latency_hist_reset,
-};
-
-static const struct file_operations enable_fops = {
- .open = tracing_open_generic,
- .read = show_enable,
- .write = do_enable,
-};
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static const struct file_operations pid_fops = {
- .open = tracing_open_generic,
- .read = show_pid,
- .write = do_pid,
-};
-
-static const struct file_operations maxlatproc_fops = {
- .open = tracing_open_generic,
- .read = show_maxlatproc,
-};
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-static notrace void probe_preemptirqsoff_hist(void *v, int reason,
- int starthist)
-{
- int cpu = raw_smp_processor_id();
- int time_set = 0;
-
- if (starthist) {
- cycle_t uninitialized_var(start);
-
- if (!preempt_count() && !irqs_disabled())
- return;
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- if ((reason == IRQS_OFF || reason == TRACE_START) &&
- !per_cpu(hist_irqsoff_counting, cpu)) {
- per_cpu(hist_irqsoff_counting, cpu) = 1;
- start = ftrace_now(cpu);
- time_set++;
- per_cpu(hist_irqsoff_start, cpu) = start;
- }
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
- if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
- !per_cpu(hist_preemptoff_counting, cpu)) {
- per_cpu(hist_preemptoff_counting, cpu) = 1;
- if (!(time_set++))
- start = ftrace_now(cpu);
- per_cpu(hist_preemptoff_start, cpu) = start;
- }
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- if (per_cpu(hist_irqsoff_counting, cpu) &&
- per_cpu(hist_preemptoff_counting, cpu) &&
- !per_cpu(hist_preemptirqsoff_counting, cpu)) {
- per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
- if (!time_set)
- start = ftrace_now(cpu);
- per_cpu(hist_preemptirqsoff_start, cpu) = start;
- }
-#endif
- } else {
- cycle_t uninitialized_var(stop);
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- if ((reason == IRQS_ON || reason == TRACE_STOP) &&
- per_cpu(hist_irqsoff_counting, cpu)) {
- cycle_t start = per_cpu(hist_irqsoff_start, cpu);
-
- stop = ftrace_now(cpu);
- time_set++;
- if (start) {
- long latency = ((long) (stop - start)) /
- NSECS_PER_USECS;
-
- latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
- stop, NULL);
- }
- per_cpu(hist_irqsoff_counting, cpu) = 0;
- }
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
- if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
- per_cpu(hist_preemptoff_counting, cpu)) {
- cycle_t start = per_cpu(hist_preemptoff_start, cpu);
-
- if (!(time_set++))
- stop = ftrace_now(cpu);
- if (start) {
- long latency = ((long) (stop - start)) /
- NSECS_PER_USECS;
-
- latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
- 0, stop, NULL);
- }
- per_cpu(hist_preemptoff_counting, cpu) = 0;
- }
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- if ((!per_cpu(hist_irqsoff_counting, cpu) ||
- !per_cpu(hist_preemptoff_counting, cpu)) &&
- per_cpu(hist_preemptirqsoff_counting, cpu)) {
- cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
-
- if (!time_set)
- stop = ftrace_now(cpu);
- if (start) {
- long latency = ((long) (stop - start)) /
- NSECS_PER_USECS;
-
- latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
- latency, 0, stop, NULL);
- }
- per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
- }
-#endif
- }
-}
-#endif
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
-static DEFINE_RAW_SPINLOCK(wakeup_lock);
-static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
- int cpu)
-{
- int old_cpu = task_cpu(task);
-
- if (cpu != old_cpu) {
- unsigned long flags;
- struct task_struct *cpu_wakeup_task;
-
- raw_spin_lock_irqsave(&wakeup_lock, flags);
-
- cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
- if (task == cpu_wakeup_task) {
- put_task_struct(cpu_wakeup_task);
- per_cpu(wakeup_task, old_cpu) = NULL;
- cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
- get_task_struct(cpu_wakeup_task);
- }
-
- raw_spin_unlock_irqrestore(&wakeup_lock, flags);
- }
-}
-
-static notrace void probe_wakeup_latency_hist_start(void *v,
- struct task_struct *p, int success)
-{
- unsigned long flags;
- struct task_struct *curr = current;
- int cpu = task_cpu(p);
- struct task_struct *cpu_wakeup_task;
-
- raw_spin_lock_irqsave(&wakeup_lock, flags);
-
- cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-
- if (wakeup_pid) {
- if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
- p->prio == curr->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
- if (likely(wakeup_pid != task_pid_nr(p)))
- goto out;
- } else {
- if (likely(!rt_task(p)) ||
- (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
- p->prio > curr->prio)
- goto out;
- if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
- p->prio == curr->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
- }
-
- if (cpu_wakeup_task)
- put_task_struct(cpu_wakeup_task);
- cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
- get_task_struct(cpu_wakeup_task);
- cpu_wakeup_task->preempt_timestamp_hist =
- ftrace_now(raw_smp_processor_id());
-out:
- raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-}
-
-static notrace void probe_wakeup_latency_hist_stop(void *v,
- struct task_struct *prev, struct task_struct *next)
-{
- unsigned long flags;
- int cpu = task_cpu(next);
- long latency;
- cycle_t stop;
- struct task_struct *cpu_wakeup_task;
-
- raw_spin_lock_irqsave(&wakeup_lock, flags);
-
- cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-
- if (cpu_wakeup_task == NULL)
- goto out;
-
- /* Already running? */
- if (unlikely(current == cpu_wakeup_task))
- goto out_reset;
-
- if (next != cpu_wakeup_task) {
- if (next->prio < cpu_wakeup_task->prio)
- goto out_reset;
-
- if (next->prio == cpu_wakeup_task->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
-
- goto out;
- }
-
- if (current->prio == cpu_wakeup_task->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
-
- /*
- * The task we are waiting for is about to be switched to.
- * Calculate latency and store it in histogram.
- */
- stop = ftrace_now(raw_smp_processor_id());
-
- latency = ((long) (stop - next->preempt_timestamp_hist)) /
- NSECS_PER_USECS;
-
- if (per_cpu(wakeup_sharedprio, cpu)) {
- latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
- next);
- per_cpu(wakeup_sharedprio, cpu) = 0;
- } else {
- latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- if (timerandwakeup_enabled_data.enabled) {
- latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
- next->timer_offset + latency, next->timer_offset,
- stop, next);
- }
-#endif
- }
-
-out_reset:
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- next->timer_offset = 0;
-#endif
- put_task_struct(cpu_wakeup_task);
- per_cpu(wakeup_task, cpu) = NULL;
-out:
- raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-}
-#endif
-
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-static notrace void probe_hrtimer_interrupt(void *v, int cpu,
- long long latency_ns, struct task_struct *curr,
- struct task_struct *task)
-{
- if (latency_ns <= 0 && task != NULL && rt_task(task) &&
- (task->prio < curr->prio ||
- (task->prio == curr->prio &&
- !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
- long latency;
- cycle_t now;
-
- if (missed_timer_offsets_pid) {
- if (likely(missed_timer_offsets_pid !=
- task_pid_nr(task)))
- return;
- }
-
- now = ftrace_now(cpu);
- latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
- latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
- task);
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- task->timer_offset = latency;
-#endif
- }
-}
-#endif
-
-static __init int latency_hist_init(void)
-{
- struct dentry *latency_hist_root = NULL;
- struct dentry *dentry;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- struct dentry *dentry_sharedprio;
-#endif
- struct dentry *entry;
- struct dentry *enable_root;
- int i = 0;
- struct hist_data *my_hist;
- char name[64];
- char *cpufmt = "CPU%d";
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- char *cpufmt_maxlatproc = "max_latency-CPU%d";
- struct maxlatproc_data *mp = NULL;
-#endif
-
- dentry = tracing_init_dentry();
- latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
- enable_root = debugfs_create_dir("enable", latency_hist_root);
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(irqsoff_hist, i), &latency_hist_fops);
- my_hist = &per_cpu(irqsoff_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
- dentry = debugfs_create_dir(preemptoff_hist_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(preemptoff_hist, i), &latency_hist_fops);
- my_hist = &per_cpu(preemptoff_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
- my_hist = &per_cpu(preemptirqsoff_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- entry = debugfs_create_file("preemptirqsoff", 0644,
- enable_root, (void *)&preemptirqsoff_enabled_data,
- &enable_fops);
-#endif
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- dentry = debugfs_create_dir(wakeup_latency_hist_dir,
- latency_hist_root);
- dentry_sharedprio = debugfs_create_dir(
- wakeup_latency_hist_dir_sharedprio, dentry);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
-
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(wakeup_latency_hist, i),
- &latency_hist_fops);
- my_hist = &per_cpu(wakeup_latency_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- entry = debugfs_create_file(name, 0444, dentry_sharedprio,
- &per_cpu(wakeup_latency_hist_sharedprio, i),
- &latency_hist_fops);
- my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- sprintf(name, cpufmt_maxlatproc, i);
-
- mp = &per_cpu(wakeup_maxlatproc, i);
- entry = debugfs_create_file(name, 0444, dentry, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
-
- mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
- entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
- }
- entry = debugfs_create_file("pid", 0644, dentry,
- (void *)&wakeup_pid, &pid_fops);
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
- entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
- (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
- entry = debugfs_create_file("wakeup", 0644,
- enable_root, (void *)&wakeup_latency_enabled_data,
- &enable_fops);
-#endif
-
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- dentry = debugfs_create_dir(missed_timer_offsets_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
- my_hist = &per_cpu(missed_timer_offsets, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- sprintf(name, cpufmt_maxlatproc, i);
- mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
- entry = debugfs_create_file(name, 0444, dentry, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
- }
- entry = debugfs_create_file("pid", 0644, dentry,
- (void *)&missed_timer_offsets_pid, &pid_fops);
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
- entry = debugfs_create_file("missed_timer_offsets", 0644,
- enable_root, (void *)&missed_timer_offsets_enabled_data,
- &enable_fops);
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(timerandwakeup_latency_hist, i),
- &latency_hist_fops);
- my_hist = &per_cpu(timerandwakeup_latency_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- sprintf(name, cpufmt_maxlatproc, i);
- mp = &per_cpu(timerandwakeup_maxlatproc, i);
- entry = debugfs_create_file(name, 0444, dentry, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
- entry = debugfs_create_file("timerandwakeup", 0644,
- enable_root, (void *)&timerandwakeup_enabled_data,
- &enable_fops);
-#endif
- return 0;
-}
-
-device_initcall(latency_hist_init);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0e337ee..21ee379 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*/
-void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
DEFINE_WAIT(wait);
@@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work;
else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -ENODEV;
cpu_buffer = buffer->buffers[cpu];
work = &cpu_buffer->irq_work;
}
@@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
schedule();
finish_wait(&work->waiters, &wait);
+ return 0;
}
/**
@@ -613,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
- if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
- (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
- return POLLIN | POLLRDNORM;
-
if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work;
else {
@@ -627,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
work = &cpu_buffer->irq_work;
}
- work->waiters_pending = true;
poll_wait(filp, &work->waiters, poll_table);
+ work->waiters_pending = true;
+ /*
+ * There's a tight race between setting the waiters_pending and
+ * checking if the ring buffer is empty. Once the waiters_pending bit
+ * is set, the next event will wake the task up, but we can get stuck
+ * if there's only a single event in.
+ *
+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
+ * but adding a memory barrier to all events will cause too much of a
+ * performance hit in the fast path. We only need a memory barrier when
+ * the buffer goes from empty to having content. But as this race is
+ * extremely small, and it's not a problem if another event comes in, we
+ * will fix it later.
+ */
+ smp_mb();
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
@@ -1982,7 +1995,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
/**
* rb_update_event - update event type and data
- * @event: the even to update
+ * @event: the event to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
*
@@ -3355,21 +3368,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
/* Iterator usage is expected to have record disabled */
- if (list_empty(&cpu_buffer->reader_page->list)) {
- iter->head_page = rb_set_head_page(cpu_buffer);
- if (unlikely(!iter->head_page))
- return;
- iter->head = iter->head_page->read;
- } else {
- iter->head_page = cpu_buffer->reader_page;
- iter->head = cpu_buffer->reader_page->read;
- }
+ iter->head_page = cpu_buffer->reader_page;
+ iter->head = cpu_buffer->reader_page->read;
+
+ iter->cache_reader_page = iter->head_page;
+ iter->cache_read = cpu_buffer->read;
+
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;
else
iter->read_stamp = iter->head_page->page->time_stamp;
- iter->cache_reader_page = cpu_buffer->reader_page;
- iter->cache_read = cpu_buffer->read;
}
/**
@@ -3762,12 +3770,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL;
/*
- * We repeat when a time extend is encountered.
- * Since the time extend is always attached to a data event,
- * we should never loop more than once.
- * (We never hit the following condition more than twice).
+ * We repeat when a time extend is encountered or we hit
+ * the end of the page. Since the time extend is always attached
+ * to a data event, we should never loop more than three times.
+ * Once for going to next page, once on time extend, and
+ * finally once to get the event.
+ * (We never hit the following condition more than thrice).
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
return NULL;
if (rb_per_cpu_empty(cpu_buffer))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f9401ed..691a8ea 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -434,6 +434,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
struct print_entry *entry;
unsigned long irq_flags;
int alloc;
+ int pc;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -442,8 +448,8 @@ int __trace_puts(unsigned long ip, const char *str, int size)
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- irq_flags, preempt_count());
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ irq_flags, pc);
if (!event)
return 0;
@@ -460,6 +466,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(buffer, irq_flags, 4, pc);
return size;
}
@@ -477,6 +484,12 @@ int __trace_bputs(unsigned long ip, const char *str)
struct bputs_entry *entry;
unsigned long irq_flags;
int size = sizeof(struct bputs_entry);
+ int pc;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -484,7 +497,7 @@ int __trace_bputs(unsigned long ip, const char *str)
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- irq_flags, preempt_count());
+ irq_flags, pc);
if (!event)
return 0;
@@ -493,6 +506,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;
__buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(buffer, irq_flags, 4, pc);
return 1;
}
@@ -750,7 +764,7 @@ static struct {
{ trace_clock_local, "local", 1 },
{ trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter", 0 },
- { trace_clock_jiffies, "uptime", 1 },
+ { trace_clock_jiffies, "uptime", 0 },
{ trace_clock, "perf", 1 },
ARCH_TRACE_CLOCKS
};
@@ -1044,13 +1058,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */
-static void default_wait_pipe(struct trace_iterator *iter)
+static int default_wait_pipe(struct trace_iterator *iter)
{
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
- return;
+ return 0;
- ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+ return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
}
#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -1323,7 +1337,6 @@ void tracing_start(void)
arch_spin_unlock(&ftrace_max_lock);
- ftrace_start();
out:
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}
@@ -1370,7 +1383,6 @@ void tracing_stop(void)
struct ring_buffer *buffer;
unsigned long flags;
- ftrace_stop();
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
if (global_trace.stop_count++)
goto out;
@@ -1417,12 +1429,12 @@ static void tracing_stop_tr(struct trace_array *tr)
void trace_stop_cmdline_recording(void);
-static void trace_save_cmdline(struct task_struct *tsk)
+static int trace_save_cmdline(struct task_struct *tsk)
{
unsigned pid, idx;
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
- return;
+ return 0;
/*
* It's not the end of the world if we don't get
@@ -1431,7 +1443,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
* so if we miss here, then better luck next time.
*/
if (!arch_spin_trylock(&trace_cmdline_lock))
- return;
+ return 0;
idx = map_pid_to_cmdline[tsk->pid];
if (idx == NO_CMDLINE_MAP) {
@@ -1456,6 +1468,8 @@ static void trace_save_cmdline(struct task_struct *tsk)
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
arch_spin_unlock(&trace_cmdline_lock);
+
+ return 1;
}
void trace_find_cmdline(int pid, char comm[])
@@ -1497,9 +1511,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
if (!__this_cpu_read(trace_cmdline_save))
return;
- __this_cpu_write(trace_cmdline_save, false);
-
- trace_save_cmdline(tsk);
+ if (trace_save_cmdline(tsk))
+ __this_cpu_write(trace_cmdline_save, false);
}
void
@@ -1509,7 +1522,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
- entry->preempt_lazy_count = preempt_lazy_count();
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
@@ -1519,10 +1531,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
- (need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
- (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0);
-
- entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -2412,17 +2421,14 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _--------=> CPU# \n");
- seq_puts(m, "# / _-------=> irqs-off \n");
- seq_puts(m, "# | / _------=> need-resched \n");
- seq_puts(m, "# || / _-----=> need-resched_lazy \n");
- seq_puts(m, "# ||| / _----=> hardirq/softirq \n");
- seq_puts(m, "# |||| / _---=> preempt-depth \n");
- seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n");
- seq_puts(m, "# |||||| / _-=> migrate-disable \n");
- seq_puts(m, "# ||||||| / delay \n");
- seq_puts(m, "# cmd pid |||||||| time | caller \n");
- seq_puts(m, "# \\ / |||||||| \\ | / \n");
+ seq_puts(m, "# _------=> CPU# \n");
+ seq_puts(m, "# / _-----=> irqs-off \n");
+ seq_puts(m, "# | / _----=> need-resched \n");
+ seq_puts(m, "# || / _---=> hardirq/softirq \n");
+ seq_puts(m, "# ||| / _--=> preempt-depth \n");
+ seq_puts(m, "# |||| / delay \n");
+ seq_puts(m, "# cmd pid ||||| time | caller \n");
+ seq_puts(m, "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
@@ -2446,16 +2452,13 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
{
print_event_info(buf, m);
- seq_puts(m, "# _-------=> irqs-off \n");
- seq_puts(m, "# / _------=> need-resched \n");
- seq_puts(m, "# |/ _-----=> need-resched_lazy \n");
- seq_puts(m, "# ||/ _----=> hardirq/softirq \n");
- seq_puts(m, "# |||/ _---=> preempt-depth \n");
- seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n");
- seq_puts(m, "# ||||| / _-=> migrate-disable \n");
- seq_puts(m, "# |||||| / delay\n");
- seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n");
- seq_puts(m, "# | | | |||||| | |\n");
+ seq_puts(m, "# _-----=> irqs-off\n");
+ seq_puts(m, "# / _----=> need-resched\n");
+ seq_puts(m, "# | / _---=> hardirq/softirq\n");
+ seq_puts(m, "# || / _--=> preempt-depth\n");
+ seq_puts(m, "# ||| / delay\n");
+ seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | |||| | |\n");
}
void
@@ -4070,17 +4073,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
*
* Anyway, this is really very primitive wakeup.
*/
-void poll_wait_pipe(struct trace_iterator *iter)
+int poll_wait_pipe(struct trace_iterator *iter)
{
set_current_state(TASK_INTERRUPTIBLE);
/* sleep for 100 msecs, and try again. */
schedule_timeout(HZ / 10);
+ return 0;
}
/* Must be called with trace_types_lock mutex held. */
static int tracing_wait_pipe(struct file *filp)
{
struct trace_iterator *iter = filp->private_data;
+ int ret;
while (trace_empty(iter)) {
@@ -4090,10 +4095,13 @@ static int tracing_wait_pipe(struct file *filp)
mutex_unlock(&iter->mutex);
- iter->trace->wait_pipe(iter);
+ ret = iter->trace->wait_pipe(iter);
mutex_lock(&iter->mutex);
+ if (ret)
+ return ret;
+
if (signal_pending(current))
return -EINTR;
@@ -5027,8 +5035,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
goto out_unlock;
}
mutex_unlock(&trace_types_lock);
- iter->trace->wait_pipe(iter);
+ ret = iter->trace->wait_pipe(iter);
mutex_lock(&trace_types_lock);
+ if (ret) {
+ size = ret;
+ goto out_unlock;
+ }
if (signal_pending(current)) {
size = -EINTR;
goto out_unlock;
@@ -5240,8 +5252,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
goto out;
}
mutex_unlock(&trace_types_lock);
- iter->trace->wait_pipe(iter);
+ ret = iter->trace->wait_pipe(iter);
mutex_lock(&trace_types_lock);
+ if (ret)
+ goto out;
if (signal_pending(current)) {
ret = -EINTR;
goto out;
@@ -6049,7 +6063,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
int ret;
/* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (WARN_ON_ONCE(parent != trace_instance_dir))
return -ENOENT;
@@ -6076,7 +6090,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
int ret;
/* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (WARN_ON_ONCE(parent != trace_instance_dir))
return -ENOENT;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 109291a..7e8be3e 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -117,7 +117,6 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
- * NEED_RESCHED_LAZY - lazy reschedule is requested
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -125,7 +124,6 @@ enum trace_flag_type {
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
};
#define TRACE_BUF_SIZE 1024
@@ -336,7 +334,7 @@ struct tracer {
void (*stop)(struct trace_array *tr);
void (*open)(struct trace_iterator *iter);
void (*pipe_open)(struct trace_iterator *iter);
- void (*wait_pipe)(struct trace_iterator *iter);
+ int (*wait_pipe)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
void (*pipe_close)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter,
@@ -551,7 +549,7 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
-void poll_wait_pipe(struct trace_iterator *iter);
+int poll_wait_pipe(struct trace_iterator *iter);
void tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348..57b67b1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
/*
* trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
*/
u64 notrace trace_clock_jiffies(void)
{
- u64 jiffy = jiffies - INITIAL_JIFFIES;
-
- /* Return nsecs */
- return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+ return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
}
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 9e49f3f..be15da8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -160,8 +160,6 @@ static int trace_define_common_fields(void)
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
- __common_field(unsigned short, migrate_disable);
- __common_field(unsigned short, padding);
return ret;
}
@@ -429,7 +427,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
if (dir) {
spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ list_for_each_entry(child, &dir->d_subdirs, d_child) {
if (child->d_inode) /* probably unneeded */
child->d_inode->i_private = NULL;
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2f4eb37..2aefbee 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include "trace.h"
-#include <trace/events/hist.h>
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
@@ -440,13 +439,11 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- trace_preemptirqsoff_hist(TRACE_START, 1);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
void stop_critical_timings(void)
{
- trace_preemptirqsoff_hist(TRACE_STOP, 0);
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -456,7 +453,6 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
- trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
@@ -465,7 +461,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
- trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
#else /* !CONFIG_PROVE_LOCKING */
@@ -491,7 +486,6 @@ inline void print_irqtrace_events(struct task_struct *curr)
*/
void trace_hardirqs_on(void)
{
- trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -501,13 +495,11 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
void trace_hardirqs_on_caller(unsigned long caller_addr)
{
- trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
@@ -517,7 +509,6 @@ void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
- trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -527,14 +518,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- trace_preemptirqsoff_hist(PREEMPT_ON, 0);
if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- trace_preemptirqsoff_hist(PREEMPT_ON, 1);
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 46b6467..34e7cba 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -606,7 +606,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
- char need_resched_lazy;
char irqs_off;
int hardirq;
int softirq;
@@ -621,17 +620,14 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
'.';
need_resched =
(entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.';
- need_resched_lazy =
- (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
hardsoft_irq =
(hardirq && softirq) ? 'H' :
hardirq ? 'h' :
softirq ? 's' :
'.';
- if (!trace_seq_printf(s, "%c%c%c%c",
- irqs_off, need_resched, need_resched_lazy,
- hardsoft_irq))
+ if (!trace_seq_printf(s, "%c%c%c",
+ irqs_off, need_resched, hardsoft_irq))
return 0;
if (entry->preempt_count)
@@ -639,16 +635,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
else
ret = trace_seq_putc(s, '.');
- if (entry->preempt_lazy_count)
- ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count);
- else
- ret = trace_seq_putc(s, '.');
-
- if (entry->migrate_disable)
- ret = trace_seq_printf(s, "%x", entry->migrate_disable);
- else
- ret = trace_seq_putc(s, '.');
-
return ret;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 559329d..d8ce71b 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -312,7 +312,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
int size;
syscall_nr = trace_get_syscall_nr(current, regs);
- if (syscall_nr < 0)
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
return;
if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
return;
@@ -354,7 +354,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
int syscall_nr;
syscall_nr = trace_get_syscall_nr(current, regs);
- if (syscall_nr < 0)
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
return;
if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
return;
@@ -557,7 +557,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
int size;
syscall_nr = trace_get_syscall_nr(current, regs);
- if (syscall_nr < 0)
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
return;
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
return;
@@ -631,7 +631,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
int size;
syscall_nr = trace_get_syscall_nr(current, regs);
- if (syscall_nr < 0)
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
return;
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
return;