diff options
author | Scott Wood <scottwood@freescale.com> | 2014-04-07 23:49:35 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-04-07 23:49:35 (GMT) |
commit | 62b8c978ee6b8d135d9e7953221de58000dba986 (patch) | |
tree | 683b04b2e627f6710c22c151b23c8cc9a165315e /kernel/time | |
parent | 78fd82238d0e5716578c326404184a27ba67fd6e (diff) | |
download | linux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz |
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/Kconfig | 2 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 4 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 2 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 52 | ||||
-rw-r--r-- | kernel/time/ntp.c | 3 | ||||
-rw-r--r-- | kernel/time/sched_clock.c | 114 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 1 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 15 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 25 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 5 | ||||
-rw-r--r-- | kernel/time/timer_stats.c | 8 |
12 files changed, 98 insertions, 135 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 3ce6e8c..2b62fe8 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -100,7 +100,7 @@ config NO_HZ_FULL # RCU_USER_QS dependency depends on HAVE_CONTEXT_TRACKING # VIRT_CPU_ACCOUNTING_GEN dependency - depends on HAVE_VIRT_CPU_ACCOUNTING_GEN + depends on 64BIT select NO_HZ_COMMON select RCU_USER_QS select RCU_NOCB_CPU diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 88c9c65..eec50fc 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -490,7 +490,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp) clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; if (!alarmtimer_get_rtcdev()) - return -EINVAL; + return -ENOTSUPP; return hrtimer_get_res(baseid, tp); } @@ -507,7 +507,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp) struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; if (!alarmtimer_get_rtcdev()) - return -EINVAL; + return -ENOTSUPP; *tp = ktime_to_timespec(base->gettime()); return 0; diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 086ad60..662c579 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -619,7 +619,7 @@ static ssize_t sysfs_unbind_tick_dev(struct device *dev, const char *buf, size_t count) { char name[CS_NAME_LEN]; - ssize_t ret = sysfs_get_uname(buf, name, count); + size_t ret = sysfs_get_uname(buf, name, count); struct clock_event_device *ce; if (ret < 0) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index ba3e502..50a8736 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -479,7 +479,6 @@ static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_resume_watchdog(void) { } static inline int __clocksource_watchdog_kthread(void) { return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } -void clocksource_mark_unstable(struct clocksource *cs) { } #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ @@ -538,55 +537,40 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) } /** - * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted - * @mult: cycle to nanosecond multiplier - * @shift: cycle to nanosecond divisor (power of two) - * @maxadj: maximum adjustment value to mult (~11%) - * @mask: bitmask for two's complement subtraction of non 64 bit counters + * clocksource_max_deferment - Returns max time the clocksource can be deferred + * @cs: Pointer to clocksource + * */ -u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) +static u64 clocksource_max_deferment(struct clocksource *cs) { u64 max_nsecs, max_cycles; /* * Calculate the maximum number of cycles that we can pass to the * cyc2ns function without overflowing a 64-bit signed result. The - * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj) + * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj) * which is equivalent to the below. - * max_cycles < (2^63)/(mult + maxadj) - * max_cycles < 2^(log2((2^63)/(mult + maxadj))) - * max_cycles < 2^(log2(2^63) - log2(mult + maxadj)) - * max_cycles < 2^(63 - log2(mult + maxadj)) - * max_cycles < 1 << (63 - log2(mult + maxadj)) + * max_cycles < (2^63)/(cs->mult + cs->maxadj) + * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj))) + * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj)) + * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj)) + * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj)) * Please note that we add 1 to the result of the log2 to account for * any rounding errors, ensure the above inequality is satisfied and * no overflow will occur. */ - max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1)); + max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1)); /* * The actual maximum number of cycles we can defer the clocksource is - * determined by the minimum of max_cycles and mask. + * determined by the minimum of max_cycles and cs->mask. * Note: Here we subtract the maxadj to make sure we don't sleep for * too long if there's a large negative adjustment. */ - max_cycles = min(max_cycles, mask); - max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); - - return max_nsecs; -} - -/** - * clocksource_max_deferment - Returns max time the clocksource can be deferred - * @cs: Pointer to clocksource - * - */ -static u64 clocksource_max_deferment(struct clocksource *cs) -{ - u64 max_nsecs; + max_cycles = min_t(u64, max_cycles, (u64) cs->mask); + max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj, + cs->shift); - max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj, - cs->mask); /* * To ensure that the clocksource does not wrap whilst we are idle, * limit the time the clocksource can be deferred by 12.5%. Please @@ -909,7 +893,7 @@ sysfs_show_current_clocksources(struct device *dev, return count; } -ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) +size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) { size_t ret = cnt; @@ -940,7 +924,7 @@ static ssize_t sysfs_override_clocksource(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - ssize_t ret; + size_t ret; mutex_lock(&clocksource_mutex); @@ -968,7 +952,7 @@ static ssize_t sysfs_unbind_clocksource(struct device *dev, { struct clocksource *cs; char name[CS_NAME_LEN]; - ssize_t ret; + size_t ret; ret = sysfs_get_uname(buf, name, count); if (ret < 0) diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index af8d1d4..bb22151 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -475,7 +475,6 @@ static void sync_cmos_clock(struct work_struct *work) * called as close as possible to 500 ms before the new second starts. * This code is run on a timer. If the clock is set, that timer * may not expire at the correct time. Thus, we adjust... - * We want the clock to be within a couple of ticks from the target. */ if (!ntp_synced()) { /* @@ -486,7 +485,7 @@ static void sync_cmos_clock(struct work_struct *work) } getnstimeofday(&now); - if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { + if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) { struct timespec adjust = now; fail = -ENODEV; diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 68b7993..0b479a6 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -8,28 +8,25 @@ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/jiffies.h> -#include <linux/ktime.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/syscore_ops.h> -#include <linux/hrtimer.h> +#include <linux/timer.h> #include <linux/sched_clock.h> -#include <linux/seqlock.h> -#include <linux/bitops.h> struct clock_data { - ktime_t wrap_kt; u64 epoch_ns; - u64 epoch_cyc; - seqcount_t seq; + u32 epoch_cyc; + u32 epoch_cyc_copy; unsigned long rate; u32 mult; u32 shift; bool suspended; }; -static struct hrtimer sched_clock_timer; +static void sched_clock_poll(unsigned long wrap_ticks); +static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); static int irqtime = -1; core_param(irqtime, irqtime, int, 0400); @@ -38,46 +35,42 @@ static struct clock_data cd = { .mult = NSEC_PER_SEC / HZ, }; -static u64 __read_mostly sched_clock_mask; +static u32 __read_mostly sched_clock_mask = 0xffffffff; -static u64 notrace jiffy_sched_clock_read(void) +static u32 notrace jiffy_sched_clock_read(void) { - /* - * We don't need to use get_jiffies_64 on 32-bit arches here - * because we register with BITS_PER_LONG - */ - return (u64)(jiffies - INITIAL_JIFFIES); + return (u32)(jiffies - INITIAL_JIFFIES); } -static u32 __read_mostly (*read_sched_clock_32)(void); - -static u64 notrace read_sched_clock_32_wrapper(void) -{ - return read_sched_clock_32(); -} - -static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; +static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -unsigned long long notrace sched_clock(void) +static unsigned long long notrace sched_clock_32(void) { u64 epoch_ns; - u64 epoch_cyc; - u64 cyc; - unsigned long seq; + u32 epoch_cyc; + u32 cyc; if (cd.suspended) return cd.epoch_ns; + /* + * Load the epoch_cyc and epoch_ns atomically. We do this by + * ensuring that we always write epoch_cyc, epoch_ns and + * epoch_cyc_copy in strict order, and read them in strict order. + * If epoch_cyc and epoch_cyc_copy are not equal, then we're in + * the middle of an update, and we should repeat the load. + */ do { - seq = read_seqcount_begin(&cd.seq); epoch_cyc = cd.epoch_cyc; + smp_rmb(); epoch_ns = cd.epoch_ns; - } while (read_seqcount_retry(&cd.seq, seq)); + smp_rmb(); + } while (epoch_cyc != cd.epoch_cyc_copy); cyc = read_sched_clock(); cyc = (cyc - epoch_cyc) & sched_clock_mask; @@ -90,46 +83,49 @@ unsigned long long notrace sched_clock(void) static void notrace update_sched_clock(void) { unsigned long flags; - u64 cyc; + u32 cyc; u64 ns; cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); - + /* + * Write epoch_cyc and epoch_ns in a way that the update is + * detectable in cyc_to_fixed_sched_clock(). + */ raw_local_irq_save(flags); - write_seqcount_begin(&cd.seq); + cd.epoch_cyc_copy = cyc; + smp_wmb(); cd.epoch_ns = ns; + smp_wmb(); cd.epoch_cyc = cyc; - write_seqcount_end(&cd.seq); raw_local_irq_restore(flags); } -static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) +static void sched_clock_poll(unsigned long wrap_ticks) { + mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); update_sched_clock(); - hrtimer_forward_now(hrt, cd.wrap_kt); - return HRTIMER_RESTART; } -void __init sched_clock_register(u64 (*read)(void), int bits, - unsigned long rate) +void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { - unsigned long r; + unsigned long r, w; u64 res, wrap; char r_unit; if (cd.rate > rate) return; + BUG_ON(bits > 32); WARN_ON(!irqs_disabled()); read_sched_clock = read; - sched_clock_mask = CLOCKSOURCE_MASK(bits); + sched_clock_mask = (1ULL << bits) - 1; cd.rate = rate; /* calculate the mult/shift to convert counter ticks to ns. */ - clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); + clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); r = rate; if (r >= 4000000) { @@ -142,14 +138,20 @@ void __init sched_clock_register(u64 (*read)(void), int bits, r_unit = ' '; /* calculate how many ns until we wrap */ - wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); - cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); + wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); + do_div(wrap, NSEC_PER_MSEC); + w = wrap; /* calculate the ns resolution of this counter */ res = cyc_to_ns(1ULL, cd.mult, cd.shift); - pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", - bits, r, r_unit, res, wrap); + pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", + bits, r, r_unit, res, w); + /* + * Start the timer to keep sched_clock() properly updated and + * sets the initial epoch. + */ + sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); update_sched_clock(); /* @@ -164,10 +166,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits, pr_debug("Registered %pF as sched_clock source\n", read); } -void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) +unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; + +unsigned long long notrace sched_clock(void) { - read_sched_clock_32 = read; - sched_clock_register(read_sched_clock_32_wrapper, bits, rate); + return sched_clock_func(); } void __init sched_clock_postinit(void) @@ -177,22 +180,14 @@ void __init sched_clock_postinit(void) * make it the final one one. */ if (read_sched_clock == jiffy_sched_clock_read) - sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); + setup_sched_clock(jiffy_sched_clock_read, 32, HZ); - update_sched_clock(); - - /* - * Start the timer to keep sched_clock() properly updated and - * sets the initial epoch. - */ - hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - sched_clock_timer.function = sched_clock_poll; - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); + sched_clock_poll(sched_clock_timer.data); } static int sched_clock_suspend(void) { - sched_clock_poll(&sched_clock_timer); + sched_clock_poll(sched_clock_timer.data); cd.suspended = true; return 0; } @@ -200,6 +195,7 @@ static int sched_clock_suspend(void) static void sched_clock_resume(void) { cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; cd.suspended = false; } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 9532690..218bcb5 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -70,7 +70,6 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev, struct clock_event_device *newdev) { if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || - (newdev->features & CLOCK_EVT_FEAT_PERCPU) || (newdev->features & CLOCK_EVT_FEAT_C3STOP)) return false; diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 162b03a..64522ec 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -33,21 +33,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); */ ktime_t tick_next_period; ktime_t tick_period; - -/* - * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR - * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This - * variable has two functions: - * - * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the - * timekeeping lock all at once. Only the CPU which is assigned to do the - * update is handling it. - * - * 2) Hand off the duty in the NOHZ idle case by setting the value to - * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks - * at it will take over and keep the time keeping alive. The handover - * procedure also covers cpu hotplug. - */ int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; /* diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 18e71f7..bc906ca 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -31,7 +31,7 @@ extern void tick_install_replacement(struct clock_event_device *dev); extern void clockevents_shutdown(struct clock_event_device *dev); -extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); +extern size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); /* * NO_HZ / high resolution timer shared code diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index ea20f7d..3612fc7 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -361,8 +361,8 @@ void __init tick_nohz_init(void) /* * NO HZ enabled ? */ -static int tick_nohz_enabled __read_mostly = 1; -int tick_nohz_active __read_mostly; +int tick_nohz_enabled __read_mostly = 1; + /* * Enable / Disable tickless mode */ @@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t now, idle; - if (!tick_nohz_active) + if (!tick_nohz_enabled) return -1; now = ktime_get(); @@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t now, iowait; - if (!tick_nohz_active) + if (!tick_nohz_enabled) return -1; now = ktime_get(); @@ -711,10 +711,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; } - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { - ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; + if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) return false; - } if (need_resched()) return false; @@ -801,6 +799,11 @@ void tick_nohz_idle_enter(void) local_irq_disable(); ts = &__get_cpu_var(tick_cpu_sched); + /* + * set ts->inidle unconditionally. even if the system did not + * switch to nohz mode the cpu frequency governers rely on the + * update of the idle time accounting in tick_nohz_start_idle(). + */ ts->inidle = 1; __tick_nohz_idle_enter(ts); @@ -970,7 +973,7 @@ static void tick_nohz_switch_to_nohz(void) struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t next; - if (!tick_nohz_active) + if (!tick_nohz_enabled) return; local_irq_disable(); @@ -978,7 +981,7 @@ static void tick_nohz_switch_to_nohz(void) local_irq_enable(); return; } - tick_nohz_active = 1; + ts->nohz_mode = NOHZ_MODE_LOWRES; /* @@ -1136,10 +1139,8 @@ void tick_setup_sched_timer(void) } #ifdef CONFIG_NO_HZ_COMMON - if (tick_nohz_enabled) { + if (tick_nohz_enabled) ts->nohz_mode = NOHZ_MODE_HIGHRES; - tick_nohz_active = 1; - } #endif } #endif /* HIGH_RES_TIMERS */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 87b4f00..947ba25 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) tk->xtime_nsec -= remainder; tk->xtime_nsec += 1ULL << tk->shift; tk->ntp_error += remainder << tk->ntp_error_shift; - tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; + } #else #define old_vsyscall_fixup(tk) @@ -1613,10 +1613,9 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, * ktime_get_update_offsets - hrtimer helper * @offs_real: pointer to storage for monotonic -> realtime offset * @offs_boot: pointer to storage for monotonic -> boottime offset - * @offs_tai: pointer to storage for monotonic -> clock tai offset * * Returns current monotonic time and updates the offsets - * Called from hrtimer_interrupt() or retrigger_next_event() + * Called from hrtimer_interupt() or retrigger_next_event() */ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, ktime_t *offs_tai) diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 1fb08f2..0b537f2 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -298,15 +298,15 @@ static int tstats_show(struct seq_file *m, void *v) period = ktime_to_timespec(time); ms = period.tv_nsec / 1000000; - seq_puts(m, "Timer Stats Version: v0.3\n"); + seq_puts(m, "Timer Stats Version: v0.2\n"); seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); if (atomic_read(&overflow_count)) - seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count)); - seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive"); + seq_printf(m, "Overflow: %d entries\n", + atomic_read(&overflow_count)); for (i = 0; i < nr_entries; i++) { entry = entries + i; - if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) { + if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) { seq_printf(m, "%4luD, %5d %-16s ", entry->count, entry->pid, entry->comm); } else { |