From 571af55a31d3652ac1f758f116835a76d0335661 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Tue, 25 Aug 2015 14:42:53 +0800 Subject: time: Fix spelling in comments Signed-off-by: Zhen Lei Cc: Hanjun Guo Cc: John Stultz Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Thomas Gleixner Cc: Tianhong Ding Cc: Viresh Kumar Cc: Xinwei Hu Cc: Xunlei Pang Cc: Zefan Li Link: http://lkml.kernel.org/r/1440484973-13892-1-git-send-email-thunder.leizhen@huawei.com [ Fixed yet another typo in one of the sentences fixed. ] Signed-off-by: Ingo Molnar diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f..174c594 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -479,7 +479,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) * return half the number of nanoseconds the hardware counter can technically * cover. This is done so that we can potentially detect problems caused by * delayed timers or bad hardware, which might result in time intervals that - * are larger then what the math used can handle without overflows. + * are larger than what the math used can handle without overflows. */ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) { diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 457a373..435b885 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -59,7 +59,7 @@ /* * The timer bases: * - * There are more clockids then hrtimer bases. Thus, we index + * There are more clockids than hrtimer bases. Thus, we index * into the timer bases by the hrtimer_base_type enum. When trying * to reach a base using a clockid, hrtimer_clockid_to_base() * is used to convert from clockid to the proper hrtimer_base_type. diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3739ac6..125f16a 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1674,7 +1674,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) /** * accumulate_nsecs_to_secs - Accumulates nsecs into secs * - * Helper function that accumulates a the nsecs greater then a second + * Helper function that accumulates the nsecs greater than a second * from the xtime_nsec field to the xtime_secs field. * It also calls into the NTP code to handle leapsecond processing. * @@ -1726,7 +1726,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, cycle_t interval = tk->cycle_interval << shift; u64 raw_nsecs; - /* If the offset is smaller then a shifted interval, do nothing */ + /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) return offset; -- cgit v0.10.2 From 3ed769bdb2a2484fd7f9f7f3047413053aacbe21 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Fri, 18 Sep 2015 15:54:23 +0200 Subject: timers: Fix data race in timer_stats_account_timer() timer_stats_account_timer() reads timer->start_site, then checks it for NULL and then re-reads it again, while timer_stats_timer_clear_start_info() can concurrently reset timer->start_site to NULL. This should not lead to crashes, but can double number of entries in timer stats as start_site is used during comparison, the doubled entries will have unuseful NULL start_site. Read timer->start_site only once in timer_stats_account_timer(). The data race was found with KernelThreadSanitizer (KTSAN). Signed-off-by: Dmitry Vyukov Cc: andreyknvl@google.com Cc: glider@google.com Cc: kcc@google.com Cc: ktsan@googlegroups.com Cc: john.stultz@linaro.org Link: http://lkml.kernel.org/r/1442584463-69553-1-git-send-email-dvyukov@google.com Signed-off-by: Thomas Gleixner diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 84190f0..d3f5e92 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -461,10 +461,17 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) static void timer_stats_account_timer(struct timer_list *timer) { - if (likely(!timer->start_site)) + void *site; + + /* + * start_site can be concurrently reset by + * timer_stats_timer_clear_start_info() + */ + site = READ_ONCE(timer->start_site); + if (likely(!site)) return; - timer_stats_update_stats(timer, timer->start_pid, timer->start_site, + timer_stats_update_stats(timer, timer->start_pid, site, timer->function, timer->start_comm, timer->flags); } -- cgit v0.10.2 From 7ec88e4be461590b5a3817460c34603f76d9b3ae Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 Sep 2015 22:21:28 +0200 Subject: ntp/pps: use timespec64 for hardpps() There is only one user of the hardpps function in the kernel, so it makes sense to atomically change it over to using 64-bit timestamps for y2038 safety. In the hardpps implementation, we also need to change the pps_normtime structure, which is similar to struct timespec and also requires a 64-bit seconds portion. This introduces two temporary variables in pps_kc_event() to do the conversion, they will be removed again in the next step, which seemed preferable to having a larger patch changing it all at the same time. Acked-by: Richard Cochran Acked-by: David S. Miller Reviewed-by: Thomas Gleixner Signed-off-by: Arnd Bergmann Signed-off-by: John Stultz diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c index e219db1..a16cea2 100644 --- a/drivers/pps/kc.c +++ b/drivers/pps/kc.c @@ -113,10 +113,12 @@ void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, int event) { unsigned long flags; + struct timespec64 real = timespec_to_timespec64(ts->ts_real); + struct timespec64 raw = timespec_to_timespec64(ts->ts_raw); /* Pass some events to kernel consumer if activated */ spin_lock_irqsave(&pps_kc_hardpps_lock, flags); if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) - hardpps(&ts->ts_real, &ts->ts_raw); + hardpps(&real, &raw); spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); } diff --git a/include/linux/timex.h b/include/linux/timex.h index 9d3f1a5..39c25db 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) extern int do_adjtimex(struct timex *); -extern void hardpps(const struct timespec *, const struct timespec *); +extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); void ntp_notify_cmos_timer(void); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index df68cb8..bd4fa62 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -99,7 +99,7 @@ static time64_t ntp_next_leap_sec = TIME64_MAX; static int pps_valid; /* signal watchdog counter */ static long pps_tf[3]; /* phase median filter */ static long pps_jitter; /* current jitter (ns) */ -static struct timespec pps_fbase; /* beginning of the last freq interval */ +static struct timespec64 pps_fbase; /* beginning of the last freq interval */ static int pps_shift; /* current interval duration (s) (shift) */ static int pps_intcnt; /* interval counter */ static s64 pps_freq; /* frequency offset (scaled ns/s) */ @@ -773,13 +773,13 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ struct pps_normtime { - __kernel_time_t sec; /* seconds */ + s64 sec; /* seconds */ long nsec; /* nanoseconds */ }; /* normalize the timestamp so that nsec is in the ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ -static inline struct pps_normtime pps_normalize_ts(struct timespec ts) +static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) { struct pps_normtime norm = { .sec = ts.tv_sec, @@ -861,7 +861,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm) pps_errcnt++; pps_dec_freq_interval(); printk_deferred(KERN_ERR - "hardpps: PPSERROR: interval too long - %ld s\n", + "hardpps: PPSERROR: interval too long - %lld s\n", freq_norm.sec); return 0; } @@ -948,7 +948,7 @@ static void hardpps_update_phase(long error) * This code is based on David Mills's reference nanokernel * implementation. It was mostly rewritten but keeps the same idea. */ -void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) +void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) { struct pps_normtime pts_norm, freq_norm; @@ -969,7 +969,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) } /* ok, now we have a base for frequency calculation */ - freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); + freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase)); /* check that the signal is in the range * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h index 6543050..af92447 100644 --- a/kernel/time/ntp_internal.h +++ b/kernel/time/ntp_internal.h @@ -9,5 +9,5 @@ extern ktime_t ntp_get_next_leap(void); extern int second_overflow(unsigned long secs); extern int ntp_validate_timex(struct timex *); extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); -extern void __hardpps(const struct timespec *, const struct timespec *); +extern void __hardpps(const struct timespec64 *, const struct timespec64 *); #endif /* _LINUX_NTP_INTERNAL_H */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3739ac6..177188b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2025,7 +2025,7 @@ int do_adjtimex(struct timex *txc) /** * hardpps() - Accessor function to NTP __hardpps function */ -void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) +void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) { unsigned long flags; -- cgit v0.10.2 From 071eee45b1650d53d21c636d344bdcebd4577ed2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 Sep 2015 22:21:29 +0200 Subject: ntp/pps: replace getnstime_raw_and_real with 64-bit version There is exactly one caller of getnstime_raw_and_real in the kernel, which is the pps_get_ts function. This changes the caller and the implementation to work on timespec64 types rather than timespec, to avoid the time_t overflow on 32-bit architectures. For consistency with the other new functions (ktime_get_seconds, ktime_get_real_*, ...), I'm renaming the function to ktime_get_raw_and_real_ts64. We still need to convert from the internal 64-bit type to 32 bit types in the caller, but this conversion is now pushed out from getnstime_raw_and_real to pps_get_ts. A follow-up patch changes the remaining pps code to completely avoid the conversion. Acked-by: Richard Cochran Acked-by: David S. Miller Reviewed-by: Thomas Gleixner Signed-off-by: Arnd Bergmann Signed-off-by: John Stultz diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 1d2cd21..b2fbd62 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -115,7 +115,12 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, static inline void pps_get_ts(struct pps_event_time *ts) { - getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); + struct timespec64 raw, real; + + ktime_get_raw_and_real_ts64(&raw, &real); + + ts->ts_raw = timespec64_to_timespec(raw); + ts->ts_real = timespec64_to_timespec(real); } #else /* CONFIG_NTP_PPS */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ba0ae09..ec89d84 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* * PPS accessor */ -extern void getnstime_raw_and_real(struct timespec *ts_raw, - struct timespec *ts_real); +extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, + struct timespec64 *ts_real); /* * Persistent clock related interfaces diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 177188b..274ed5e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -849,7 +849,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds); #ifdef CONFIG_NTP_PPS /** - * getnstime_raw_and_real - get day and raw monotonic time in timespec format + * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format * @ts_raw: pointer to the timespec to be set to raw monotonic time * @ts_real: pointer to the timespec to be set to the time of day * @@ -857,7 +857,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds); * same time atomically and stores the resulting timestamps in timespec * format. */ -void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) +void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real) { struct timekeeper *tk = &tk_core.timekeeper; unsigned long seq; @@ -868,7 +868,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) do { seq = read_seqcount_begin(&tk_core.seq); - *ts_raw = timespec64_to_timespec(tk->raw_time); + *ts_raw = tk->raw_time; ts_real->tv_sec = tk->xtime_sec; ts_real->tv_nsec = 0; @@ -877,10 +877,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) } while (read_seqcount_retry(&tk_core.seq, seq)); - timespec_add_ns(ts_raw, nsecs_raw); - timespec_add_ns(ts_real, nsecs_real); + timespec64_add_ns(ts_raw, nsecs_raw); + timespec64_add_ns(ts_real, nsecs_real); } -EXPORT_SYMBOL(getnstime_raw_and_real); +EXPORT_SYMBOL(ktime_get_raw_and_real_ts64); #endif /* CONFIG_NTP_PPS */ -- cgit v0.10.2 From 5fd96c421ff2c76ec441aa4139c3b87dfea93e3a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 Sep 2015 22:21:30 +0200 Subject: ntp: use timespec64 in sync_cmos_clock The sync_cmos_clock has one use of struct timespec, which we want to eventually replace with timespec64 or similar in the kernel. There is no way this one can overflow, but the conversion to timespec64 is trivial and has no other dependencies. Acked-by: Richard Cochran Acked-by: David S. Miller Reviewed-by: Thomas Gleixner Signed-off-by: Arnd Bergmann Signed-off-by: John Stultz diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index bd4fa62..149cc80 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -509,7 +509,7 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); static void sync_cmos_clock(struct work_struct *work) { struct timespec64 now; - struct timespec next; + struct timespec64 next; int fail = 1; /* @@ -559,7 +559,7 @@ static void sync_cmos_clock(struct work_struct *work) next.tv_nsec -= NSEC_PER_SEC; } queue_delayed_work(system_power_efficient_wq, - &sync_cmos_work, timespec_to_jiffies(&next)); + &sync_cmos_work, timespec64_to_jiffies(&next)); } void ntp_notify_cmos_timer(void) -- cgit v0.10.2 From ade1bdffe90e59cd257cb9bd4f5abe4de5f14911 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 Sep 2015 22:21:31 +0200 Subject: ntp/pps: use y2038 safe types in pps_event_time The pps_event_time uses two 'timespec' structures internally, which suffer from the y2038 problem. The uses of this structure are fairly self-contained in the pps code, so this replaces them all at once. Unfortunately, this includes the sfc ethernet driver aside from the pps subsystem, so we change that one as well. Both touch the same data structure, and there probably is no good way to split the patch into smaller units. Acked-by: Richard Cochran Acked-by: David S. Miller Reviewed-by: Thomas Gleixner Signed-off-by: Arnd Bergmann Signed-off-by: John Stultz diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index ad62615..fe849db 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx, struct pps_event_time *last_time) { struct pps_event_time now; - struct timespec limit; + struct timespec64 limit; struct efx_ptp_data *ptp = efx->ptp_data; - struct timespec start; + struct timespec64 start; int *mc_running = ptp->start.addr; pps_get_ts(&now); start = now.ts_real; limit = now.ts_real; - timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); + timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); /* Write host time for specified period or until MC is done */ - while ((timespec_compare(&now.ts_real, &limit) < 0) && + while ((timespec64_compare(&now.ts_real, &limit) < 0) && ACCESS_ONCE(*mc_running)) { - struct timespec update_time; + struct timespec64 update_time; unsigned int host_time; /* Don't update continuously to avoid saturating the PCIe bus */ update_time = now.ts_real; - timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); + timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); do { pps_get_ts(&now); - } while ((timespec_compare(&now.ts_real, &update_time) < 0) && + } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && ACCESS_ONCE(*mc_running)); /* Synchronise NIC with single word of time only */ @@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), struct efx_ptp_data *ptp = efx->ptp_data; u32 last_sec; u32 start_sec; - struct timespec delta; + struct timespec64 delta; ktime_t mc_time; if (number_readings == 0) diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index cdad4d9..805c749 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -179,8 +179,8 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, /* check event type */ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); - dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", - ts->ts_real.tv_sec, ts->ts_real.tv_nsec); + dev_dbg(pps->dev, "PPS event at %lld.%09ld\n", + (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); timespec_to_pps_ktime(&ts_real, ts->ts_real); diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c index a16cea2..e219db1 100644 --- a/drivers/pps/kc.c +++ b/drivers/pps/kc.c @@ -113,12 +113,10 @@ void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, int event) { unsigned long flags; - struct timespec64 real = timespec_to_timespec64(ts->ts_real); - struct timespec64 raw = timespec_to_timespec64(ts->ts_raw); /* Pass some events to kernel consumer if activated */ spin_lock_irqsave(&pps_kc_hardpps_lock, flags); if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) - hardpps(&real, &raw); + hardpps(&ts->ts_real, &ts->ts_raw); spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); } diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index b2fbd62..54bf148 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -48,9 +48,9 @@ struct pps_source_info { struct pps_event_time { #ifdef CONFIG_NTP_PPS - struct timespec ts_raw; + struct timespec64 ts_raw; #endif /* CONFIG_NTP_PPS */ - struct timespec ts_real; + struct timespec64 ts_real; }; /* The main struct */ @@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps, struct pps_device *pps_lookup_dev(void const *cookie); static inline void timespec_to_pps_ktime(struct pps_ktime *kt, - struct timespec ts) + struct timespec64 ts) { kt->sec = ts.tv_sec; kt->nsec = ts.tv_nsec; @@ -115,29 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, static inline void pps_get_ts(struct pps_event_time *ts) { - struct timespec64 raw, real; - - ktime_get_raw_and_real_ts64(&raw, &real); - - ts->ts_raw = timespec64_to_timespec(raw); - ts->ts_real = timespec64_to_timespec(real); + ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); } #else /* CONFIG_NTP_PPS */ static inline void pps_get_ts(struct pps_event_time *ts) { - getnstimeofday(&ts->ts_real); + ktime_get_real_ts64(&ts->ts_real); } #endif /* CONFIG_NTP_PPS */ /* Subtract known time delay from PPS event time(s) */ -static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) +static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) { - ts->ts_real = timespec_sub(ts->ts_real, delta); + ts->ts_real = timespec64_sub(ts->ts_real, delta); #ifdef CONFIG_NTP_PPS - ts->ts_raw = timespec_sub(ts->ts_raw, delta); + ts->ts_raw = timespec64_sub(ts->ts_raw, delta); #endif } -- cgit v0.10.2 From 090e2edb4171dae4872c4eaae972dd3ccf96d4a8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 Sep 2015 22:21:32 +0200 Subject: net: sfc: avoid using timespec The sfc driver internally uses a time format based on 32-bit (unsigned) seconds and 32-bit nanoseconds. This means it will overflow in 2106, but the value we pass into it is a signed 32-bit tv_sec that already overflows in 2038 to a negative value. This patch changes the logic to use the lower 32 bits of the timespec64 tv_sec in efx_ptp_ns_to_s_ns, which will have the correct value beyond the overflow. While this does not change any of the register values, it lets us keep using the driver after we deprecate the use of the timespec type in the kernel. In the efx_ptp_process_times function, the change to use timespec64 is similar, in that the tv_sec portion is ignored anyway and we only care about the nanosecond portion that remains unchanged. Acked-by: Richard Cochran Acked-by: David S. Miller Reviewed-by: Thomas Gleixner Signed-off-by: Arnd Bergmann Signed-off-by: John Stultz diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index fe849db..c771e0a 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) /* For Siena platforms NIC time is s and ns */ static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) { - struct timespec ts = ns_to_timespec(ns); - *nic_major = ts.tv_sec; + struct timespec64 ts = ns_to_timespec64(ns); + *nic_major = (u32)ts.tv_sec; *nic_minor = ts.tv_nsec; } @@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, */ static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) { - struct timespec ts = ns_to_timespec(ns); - u32 maj = ts.tv_sec; + struct timespec64 ts = ns_to_timespec64(ns); + u32 maj = (u32)ts.tv_sec; u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); @@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), */ for (i = 0; i < number_readings; i++) { s32 window, corrected; - struct timespec wait; + struct timespec64 wait; efx_ptp_read_timeset( MCDI_ARRAY_STRUCT_PTR(synch_buf, PTP_OUT_SYNCHRONIZE_TIMESET, i), &ptp->timeset[i]); - wait = ktime_to_timespec( + wait = ktime_to_timespec64( ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); window = ptp->timeset[i].window; corrected = window - wait.tv_nsec; @@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ptp->timeset[last_good].minor, 0); /* Calculate delay from NIC top of second to last_time */ - delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; + delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec; /* Set PPS timestamp to match NIC top of second */ ptp->host_time_pps = *last_time; -- cgit v0.10.2 From cfed432d7f4114e16e0163bcfe65e96f0c304493 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Wed, 23 Sep 2015 13:19:19 +0200 Subject: clocksource: Remove return statement from void functions Signed-off-by: Guillaume Gomez Cc: John Stultz Link: http://lkml.kernel.org/r/CAAOQCfSDgmqSWDBsetau%2ByF8x0%2BDagCF_pfFw0p5xH_BKkKEog@mail.gmail.com Signed-off-by: Thomas Gleixner diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 174c594..de0ad66 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -595,16 +595,15 @@ static void __clocksource_select(bool skipcur) */ static void clocksource_select(void) { - return __clocksource_select(false); + __clocksource_select(false); } static void clocksource_select_fallback(void) { - return __clocksource_select(true); + __clocksource_select(true); } #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ - static inline void clocksource_select(void) { } static inline void clocksource_select_fallback(void) { } -- cgit v0.10.2 From 9fc4468d546b6eb55b0aa5b04b0c36238ebf57e7 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Fri, 2 Oct 2015 09:45:30 +0200 Subject: timers: Use __fls in apply_slack() In apply_slack(), find_last_bit() is applied to a bitmask consisting of precisely BITS_PER_LONG bits. Since mask is non-zero, we might as well eliminate the function call and use __fls() directly. On x86_64, this shaves 23 bytes of the only caller, mod_timer(). This also gets rid of Coverity CID 1192106, but that is a false positive: Coverity is not aware that mask != 0 implies that find_last_bit will not return BITS_PER_LONG. Signed-off-by: Rasmus Villemoes Cc: John Stultz Link: http://lkml.kernel.org/r/1443771931-6284-1-git-send-email-linux@rasmusvillemoes.dk Signed-off-by: Thomas Gleixner diff --git a/kernel/time/timer.c b/kernel/time/timer.c index d3f5e92..74591ba 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -874,7 +874,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires) if (mask == 0) return expires; - bit = find_last_bit(&mask, BITS_PER_LONG); + bit = __fls(mask); mask = (1UL << bit) - 1; -- cgit v0.10.2 From 6035519fcf5aa17084b41790cdc584d881d82c03 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 5 Oct 2015 18:16:57 -0700 Subject: timers, kselftest: Add 'adjtick' test to validate adjtimex() tick adjustments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recently a kernel side NTP bug was fixed via the following commit: 2619d7e9c92d ("time: Fix timekeeping_freqadjust()'s incorrect use of abs() instead of abs64()") When the bug was reported it was difficult to detect, except by tweaking the adjtimex tick value, and noticing how quickly the adjustment took: https://lkml.org/lkml/2015/9/1/488 Thus this patch introduces a new test which manipulates the adjtimex tick value and validates that the results are what we expect. Signed-off-by: John Stultz Cc: Linus Torvalds Cc: Miroslav Lichvar Cc: Nuno Gonçalves Cc: Peter Zijlstra Cc: Prarit Bhargava Cc: Richard Cochran Cc: Shuah Khan Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1444094217-20258-1-git-send-email-john.stultz@linaro.org [ Tidied up the code and the changelog a bit. ] Signed-off-by: Ingo Molnar diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 89a3f44..4a1be1b 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -8,7 +8,7 @@ LDFLAGS += -lrt -lpthread TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ inconsistency-check raw_skew threadtest rtctest -TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \ +TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \ skew_consistency clocksource-switch leap-a-day \ leapcrash set-tai set-2038 @@ -24,6 +24,7 @@ include ../lib.mk run_destructive_tests: run_tests ./alarmtimer-suspend ./valid-adjtimex + ./adjtick ./change_skew ./skew_consistency ./clocksource-switch diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c new file mode 100644 index 0000000..9887fd5 --- /dev/null +++ b/tools/testing/selftests/timers/adjtick.c @@ -0,0 +1,221 @@ +/* adjtimex() tick adjustment test + * by: John Stultz + * (C) Copyright Linaro Limited 2015 + * Licensed under the GPLv2 + * + * To build: + * $ gcc adjtick.c -o adjtick -lrt + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +#ifdef KTEST +#include "../kselftest.h" +#else +static inline int ksft_exit_pass(void) +{ + exit(0); +} +static inline int ksft_exit_fail(void) +{ + exit(1); +} +#endif + +#define CLOCK_MONOTONIC_RAW 4 + +#define NSEC_PER_SEC 1000000000LL +#define USEC_PER_SEC 1000000 + +#define MILLION 1000000 + +long systick; + +long long llabs(long long val) +{ + if (val < 0) + val = -val; + return val; +} + +unsigned long long ts_to_nsec(struct timespec ts) +{ + return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; +} + +struct timespec nsec_to_ts(long long ns) +{ + struct timespec ts; + + ts.tv_sec = ns/NSEC_PER_SEC; + ts.tv_nsec = ns%NSEC_PER_SEC; + + return ts; +} + +long long diff_timespec(struct timespec start, struct timespec end) +{ + long long start_ns, end_ns; + + start_ns = ts_to_nsec(start); + end_ns = ts_to_nsec(end); + + return end_ns - start_ns; +} + +void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw) +{ + struct timespec start, mid, end; + long long diff = 0, tmp; + int i; + + clock_gettime(CLOCK_MONOTONIC, mon); + clock_gettime(CLOCK_MONOTONIC_RAW, raw); + + /* Try to get a more tightly bound pairing */ + for (i = 0; i < 3; i++) { + long long newdiff; + + clock_gettime(CLOCK_MONOTONIC, &start); + clock_gettime(CLOCK_MONOTONIC_RAW, &mid); + clock_gettime(CLOCK_MONOTONIC, &end); + + newdiff = diff_timespec(start, end); + if (diff == 0 || newdiff < diff) { + diff = newdiff; + *raw = mid; + tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2; + *mon = nsec_to_ts(tmp); + } + } +} + +long long get_ppm_drift(void) +{ + struct timespec mon_start, raw_start, mon_end, raw_end; + long long delta1, delta2, eppm; + + get_monotonic_and_raw(&mon_start, &raw_start); + + sleep(15); + + get_monotonic_and_raw(&mon_end, &raw_end); + + delta1 = diff_timespec(mon_start, mon_end); + delta2 = diff_timespec(raw_start, raw_end); + + eppm = (delta1*MILLION)/delta2 - MILLION; + + return eppm; +} + +int check_tick_adj(long tickval) +{ + long long eppm, ppm; + struct timex tx1; + + tx1.modes = ADJ_TICK; + tx1.modes |= ADJ_OFFSET; + tx1.modes |= ADJ_FREQUENCY; + tx1.modes |= ADJ_STATUS; + + tx1.status = STA_PLL; + tx1.offset = 0; + tx1.freq = 0; + tx1.tick = tickval; + + adjtimex(&tx1); + + sleep(1); + + ppm = ((long long)tickval * MILLION)/systick - MILLION; + printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm); + + eppm = get_ppm_drift(); + printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm); + + tx1.modes = 0; + adjtimex(&tx1); + + if (tx1.offset || tx1.freq || tx1.tick != tickval) { + printf(" [ERROR]\n"); + printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n"); + return -1; + } + + /* + * Here we use 100ppm difference as an error bound. + * We likely should see better, but some coarse clocksources + * cannot match the HZ tick size accurately, so we have a + * internal correction factor that doesn't scale exactly + * with the adjustment, resulting in > 10ppm error during + * a 10% adjustment. 100ppm also gives us more breathing + * room for interruptions during the measurement. + */ + if (llabs(eppm - ppm) > 100) { + printf(" [FAILED]\n"); + return -1; + } + printf(" [OK]\n"); + + return 0; +} + +int main(int argv, char **argc) +{ + struct timespec raw; + long tick, max, interval, err; + struct timex tx1; + + err = 0; + setbuf(stdout, NULL); + + if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) { + printf("ERR: NO CLOCK_MONOTONIC_RAW\n"); + return -1; + } + + printf("Each iteration takes about 15 seconds\n"); + + systick = sysconf(_SC_CLK_TCK); + systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK); + max = systick/10; /* +/- 10% */ + interval = max/4; /* in 4 steps each side */ + + for (tick = (systick - max); tick < (systick + max); tick += interval) { + if (check_tick_adj(tick)) { + err = 1; + break; + } + } + + /* Reset things to zero */ + tx1.modes = ADJ_TICK; + tx1.modes |= ADJ_OFFSET; + tx1.modes |= ADJ_FREQUENCY; + + tx1.offset = 0; + tx1.freq = 0; + tx1.tick = systick; + + adjtimex(&tx1); + + if (err) + return ksft_exit_fail(); + + return ksft_exit_pass(); +} -- cgit v0.10.2 From 7c177d994eb9637302b79e80d331f48dfbe26368 Mon Sep 17 00:00:00 2001 From: Jason Low Date: Wed, 14 Oct 2015 12:07:53 -0700 Subject: posix_cpu_timer: Optimize fastpath_timer_check() In fastpath_timer_check(), the task_cputime() function is always called to compute the utime and stime values. However, this is not necessary if there are no per-thread timers to check for. This patch modifies the code such that we compute the task_cputime values only when there are per-thread timers set. Signed-off-by: Jason Low Reviewed-by: Oleg Nesterov Reviewed-by: Frederic Weisbecker Reviewed-by: Davidlohr Bueso Reviewed-by: George Spelvin Cc: Paul E. McKenney Cc: Steven Rostedt Cc: hideaki.kimura@hpe.com Cc: terry.rudd@hpe.com Cc: scott.norton@hpe.com Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1444849677-29330-2-git-send-email-jason.low2@hp.com Signed-off-by: Thomas Gleixner diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 892e3da..aa4b6f4 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1117,17 +1117,12 @@ static inline int task_cputime_expired(const struct task_cputime *sample, static inline int fastpath_timer_check(struct task_struct *tsk) { struct signal_struct *sig; - cputime_t utime, stime; - - task_cputime(tsk, &utime, &stime); if (!task_cputime_zero(&tsk->cputime_expires)) { - struct task_cputime task_sample = { - .utime = utime, - .stime = stime, - .sum_exec_runtime = tsk->se.sum_exec_runtime - }; + struct task_cputime task_sample; + task_cputime(tsk, &task_sample.utime, &task_sample.stime); + task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) return 1; } -- cgit v0.10.2 From 934715a191e4be0c602d39455a7a74316f274d60 Mon Sep 17 00:00:00 2001 From: Jason Low Date: Wed, 14 Oct 2015 12:07:54 -0700 Subject: posix_cpu_timer: Check thread timers only when there are active thread timers The fastpath_timer_check() contains logic to check for if any timers are set by checking if !task_cputime_zero(). Similarly, we can do this before calling check_thread_timers(). In the case where there are only process-wide timers, this will skip all of the computations for per-thread timers when there are no per-thread timers. As suggested by George, we can put the task_cputime_zero() check in check_thread_timers(), since that is more of an optization to the function. Similarly, we move the existing check of cputimer->running to check_process_timers(). Signed-off-by: Jason Low Reviewed-by: Oleg Nesterov Reviewed-by: George Spelvin Cc: Paul E. McKenney Cc: Frederic Weisbecker Cc: Davidlohr Bueso Cc: Steven Rostedt Cc: hideaki.kimura@hpe.com Cc: terry.rudd@hpe.com Cc: scott.norton@hpe.com Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1444849677-29330-3-git-send-email-jason.low2@hp.com Signed-off-by: Thomas Gleixner diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index aa4b6f4..6f6e252 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -864,6 +864,13 @@ static void check_thread_timers(struct task_struct *tsk, unsigned long long expires; unsigned long soft; + /* + * If cputime_expires is zero, then there are no active + * per thread CPU timers. + */ + if (task_cputime_zero(&tsk->cputime_expires)) + return; + expires = check_timers_list(timers, firing, prof_ticks(tsk)); tsk_expires->prof_exp = expires_to_cputime(expires); @@ -962,6 +969,13 @@ static void check_process_timers(struct task_struct *tsk, unsigned long soft; /* + * If cputimer is not running, then there are no active + * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). + */ + if (!READ_ONCE(tsk->signal->cputimer.running)) + return; + + /* * Collect the current process totals. */ thread_group_cputimer(tsk, &cputime); @@ -1169,12 +1183,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) * put them on the firing list. */ check_thread_timers(tsk, &firing); - /* - * If there are any active process wide timers (POSIX 1.b, itimers, - * RLIMIT_CPU) cputimer must be running. - */ - if (READ_ONCE(tsk->signal->cputimer.running)) - check_process_timers(tsk, &firing); + + check_process_timers(tsk, &firing); /* * We must release these locks before taking any timer's lock. -- cgit v0.10.2 From d5c373eb5610686162ff50429f63f4c00c554799 Mon Sep 17 00:00:00 2001 From: Jason Low Date: Wed, 14 Oct 2015 12:07:55 -0700 Subject: posix_cpu_timer: Convert cputimer->running to bool In the next patch in this series, a new field 'checking_timer' will be added to 'struct thread_group_cputimer'. Both this and the existing 'running' integer field are just used as boolean values. To save space in the structure, we can make both of these fields booleans. This is a preparatory patch to convert the existing running integer field to a boolean. Suggested-by: George Spelvin Signed-off-by: Jason Low Reviewed: George Spelvin Cc: Oleg Nesterov Cc: Paul E. McKenney Cc: Frederic Weisbecker Cc: Davidlohr Bueso Cc: Steven Rostedt Cc: hideaki.kimura@hpe.com Cc: terry.rudd@hpe.com Cc: scott.norton@hpe.com Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1444849677-29330-4-git-send-email-jason.low2@hp.com Signed-off-by: Thomas Gleixner diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e38681f..c43b80f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -59,7 +59,7 @@ extern struct fs_struct init_fs; .rlim = INIT_RLIMITS, \ .cputimer = { \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \ - .running = 0, \ + .running = false, \ }, \ INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ diff --git a/include/linux/sched.h b/include/linux/sched.h index b7b9501..6c8504a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -617,15 +617,15 @@ struct task_cputime_atomic { /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. - * @running: non-zero when there are timers running and - * @cputime receives updates. + * @running: true when there are timers running and + * @cputime_atomic receives updates. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; - int running; + bool running; }; #include diff --git a/kernel/fork.c b/kernel/fork.c index 2845623..6ac8942 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); - sig->cputimer.running = 1; + sig->cputimer.running = true; } /* The timer lists. */ diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 6f6e252..2d58153 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -249,7 +249,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) * but barriers are not required because update_gt_cputime() * can handle concurrent updates. */ - WRITE_ONCE(cputimer->running, 1); + WRITE_ONCE(cputimer->running, true); } sample_cputime_atomic(times, &cputimer->cputime_atomic); } @@ -918,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig) struct thread_group_cputimer *cputimer = &sig->cputimer; /* Turn off cputimer->running. This is done without locking. */ - WRITE_ONCE(cputimer->running, 0); + WRITE_ONCE(cputimer->running, false); } static u32 onecputick; -- cgit v0.10.2 From c8d75aa47dd585c9538a8205e9bb9847e12cfb84 Mon Sep 17 00:00:00 2001 From: Jason Low Date: Wed, 14 Oct 2015 12:07:56 -0700 Subject: posix_cpu_timer: Reduce unnecessary sighand lock contention It was found while running a database workload on large systems that significant time was spent trying to acquire the sighand lock. The issue was that whenever an itimer expired, many threads ended up simultaneously trying to send the signal. Most of the time, nothing happened after acquiring the sighand lock because another thread had just already sent the signal and updated the "next expire" time. The fastpath_timer_check() didn't help much since the "next expire" time was updated after the threads exit fastpath_timer_check(). This patch addresses this by having the thread_group_cputimer structure maintain a boolean to signify when a thread in the group is already checking for process wide timers, and adds extra logic in the fastpath to check the boolean. Signed-off-by: Jason Low Reviewed-by: Oleg Nesterov Reviewed-by: George Spelvin Cc: Paul E. McKenney Cc: Frederic Weisbecker Cc: Davidlohr Bueso Cc: Steven Rostedt Cc: hideaki.kimura@hpe.com Cc: terry.rudd@hpe.com Cc: scott.norton@hpe.com Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1444849677-29330-5-git-send-email-jason.low2@hp.com Signed-off-by: Thomas Gleixner diff --git a/include/linux/init_task.h b/include/linux/init_task.h index c43b80f..810a34f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -60,6 +60,7 @@ extern struct fs_struct init_fs; .cputimer = { \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \ .running = false, \ + .checking_timer = false, \ }, \ INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 6c8504a..f87559d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -619,6 +619,8 @@ struct task_cputime_atomic { * @cputime_atomic: atomic thread group interval timers. * @running: true when there are timers running and * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. @@ -626,6 +628,7 @@ struct task_cputime_atomic { struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; + bool checking_timer; }; #include diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 2d58153..f5e86d2 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -975,6 +975,12 @@ static void check_process_timers(struct task_struct *tsk, if (!READ_ONCE(tsk->signal->cputimer.running)) return; + /* + * Signify that a thread is checking for process timers. + * Write access to this field is protected by the sighand lock. + */ + sig->cputimer.checking_timer = true; + /* * Collect the current process totals. */ @@ -1029,6 +1035,8 @@ static void check_process_timers(struct task_struct *tsk, sig->cputime_expires.sched_exp = sched_expires; if (task_cputime_zero(&sig->cputime_expires)) stop_process_timers(sig); + + sig->cputimer.checking_timer = false; } /* @@ -1142,8 +1150,22 @@ static inline int fastpath_timer_check(struct task_struct *tsk) } sig = tsk->signal; - /* Check if cputimer is running. This is accessed without locking. */ - if (READ_ONCE(sig->cputimer.running)) { + /* + * Check if thread group timers expired when the cputimer is + * running and no other thread in the group is already checking + * for thread group cputimers. These fields are read without the + * sighand lock. However, this is fine because this is meant to + * be a fastpath heuristic to determine whether we should try to + * acquire the sighand lock to check/handle timers. + * + * In the worst case scenario, if 'running' or 'checking_timer' gets + * set but the current thread doesn't see the change yet, we'll wait + * until the next thread in the group gets a scheduler interrupt to + * handle the timer. This isn't an issue in practice because these + * types of delays with signals actually getting sent are expected. + */ + if (READ_ONCE(sig->cputimer.running) && + !READ_ONCE(sig->cputimer.checking_timer)) { struct task_cputime group_sample; sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); -- cgit v0.10.2 From fc686d0037d782c994e338ecb01bfef8bbafff9f Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 24 Aug 2015 15:14:30 +0200 Subject: clockevents/drivers/mtk: Fix spurious interrupt leading to crash After analysis done by Yingjoe Chen, the timer appears to have a pending interrupt when it is enabled. Fix this by acknowledging the pending interrupt when enabling the timer interrupt. Signed-off-by: Daniel Lezcano Tested-by: Yingjoe Chen diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index 50f0641..c8badc4 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c @@ -141,14 +141,6 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void mtk_timer_global_reset(struct mtk_clock_event_device *evt) -{ - /* Disable all interrupts */ - writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); - /* Acknowledge all interrupts */ - writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); -} - static void mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) { @@ -168,6 +160,12 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) { u32 val; + /* Disable all interrupts */ + writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); + + /* Acknowledge all spurious pending interrupts */ + writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); + val = readl(evt->gpt_base + GPT_IRQ_EN_REG); writel(val | GPT_IRQ_ENABLE(timer), evt->gpt_base + GPT_IRQ_EN_REG); @@ -220,8 +218,6 @@ static void __init mtk_timer_init(struct device_node *node) } rate = clk_get_rate(clk); - mtk_timer_global_reset(evt); - if (request_irq(evt->dev.irq, mtk_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { pr_warn("failed to setup irq %d\n", evt->dev.irq); -- cgit v0.10.2 From f14665f63b92f958636b7e7610ec2c9726ea9335 Mon Sep 17 00:00:00 2001 From: Yingjoe Chen Date: Mon, 13 Jul 2015 17:32:46 +0800 Subject: clocksource/drivers/mediatek: Use GPT as sched clock source When cpu is in deep idle, arch timer will stop counting. Setup GPT as sched clock source so it can keep counting in idle. Signed-off-by: Yingjoe Chen Signed-off-by: Daniel Lezcano Acked-by: Stephen Boyd Acked-by: Matthias Brugger diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index c8badc4..fbfc746 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #define GPT_IRQ_EN_REG 0x00 @@ -59,6 +60,13 @@ struct mtk_clock_event_device { struct clock_event_device dev; }; +static void __iomem *gpt_sched_reg __read_mostly; + +static u64 notrace mtk_read_sched_clock(void) +{ + return readl_relaxed(gpt_sched_reg); +} + static inline struct mtk_clock_event_device *to_mtk_clk( struct clock_event_device *c) { @@ -230,6 +238,8 @@ static void __init mtk_timer_init(struct device_node *node) mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), node->name, rate, 300, 32, clocksource_mmio_readl_up); + gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC); + sched_clock_register(mtk_read_sched_clock, 32, rate); /* Configure clock event */ mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); -- cgit v0.10.2 From fdbd13105d08fed6a8201549f389e9c12021b67b Mon Sep 17 00:00:00 2001 From: Alexey Klimov Date: Thu, 10 Sep 2015 03:38:19 +0300 Subject: clocksource/drivers/em_sti: Remove unneeded memset()s Memory for cs and ced fields in struct em_sti_priv is allocated by devm_kzalloc() in the beginning of em_sti_probe() so they don't need to be zeroed one more time in em_sti_register_clocksource() and in em_sti_register_clockevent(). Signed-off-by: Alexey Klimov Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 7a97a34..19bb179 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -228,7 +228,6 @@ static int em_sti_register_clocksource(struct em_sti_priv *p) { struct clocksource *cs = &p->cs; - memset(cs, 0, sizeof(*cs)); cs->name = dev_name(&p->pdev->dev); cs->rating = 200; cs->read = em_sti_clocksource_read; @@ -285,7 +284,6 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) { struct clock_event_device *ced = &p->ced; - memset(ced, 0, sizeof(*ced)); ced->name = dev_name(&p->pdev->dev); ced->features = CLOCK_EVT_FEAT_ONESHOT; ced->rating = 200; -- cgit v0.10.2 From 6eeb8c355fbbe66d1d52eadaa588d5651f8cb092 Mon Sep 17 00:00:00 2001 From: Alexey Klimov Date: Thu, 10 Sep 2015 03:38:20 +0300 Subject: clocksource/drivers/sh_cmt: Remove unneeded memset() in sh_cmt_setup() Memory for cmt struct is allocated by kzalloc() in sh_cmt_setup. Signed-off-by: Alexey Klimov Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index ba73a6e..103c493 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -962,7 +962,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) unsigned int i; int ret; - memset(cmt, 0, sizeof(*cmt)); cmt->pdev = pdev; raw_spin_lock_init(&cmt->lock); -- cgit v0.10.2 From ba49af35be8a41af7f00a37902a2d70764078bc5 Mon Sep 17 00:00:00 2001 From: Alexey Klimov Date: Thu, 10 Sep 2015 03:38:21 +0300 Subject: clocksource/drivers/h8300_*: Remove unneeded memset()s Memory for timer16_priv, timer8_priv and tpu_priv structs is allocated by devm_kzalloc() in corresponding probe functions of drivers. No need to zero it one more time. Signed-off-by: Alexey Klimov Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c index 82941c1..0e076c6 100644 --- a/drivers/clocksource/h8300_timer16.c +++ b/drivers/clocksource/h8300_timer16.c @@ -153,7 +153,6 @@ static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev) int ret, irq; unsigned int ch; - memset(p, 0, sizeof(*p)); p->pdev = pdev; res[REG_CH] = platform_get_resource(p->pdev, diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index f9b3b70..44375d8 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -215,7 +215,6 @@ static int timer8_setup(struct timer8_priv *p, int irq; int ret; - memset(p, 0, sizeof(*p)); p->pdev = pdev; res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c index 64195fd..5487410 100644 --- a/drivers/clocksource/h8300_tpu.c +++ b/drivers/clocksource/h8300_tpu.c @@ -123,7 +123,6 @@ static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev) { struct resource *res[2]; - memset(p, 0, sizeof(*p)); p->pdev = pdev; res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L); -- cgit v0.10.2 From 31f7987406497dc65408dee4e84e3f3cc3ffe9cd Mon Sep 17 00:00:00 2001 From: Alexey Klimov Date: Fri, 4 Sep 2015 02:49:58 +0300 Subject: clocksource/drivers/exynos_mct: Use container_of() instead of this_cpu_ptr() Since evt structure is embedded in per-CPU mevt structure it's definitely faster to use container_of() to get access to mevt if we have evt (for example as incoming function argument) instead of more expensive approach with this_cpu_ptr(&percpu_mct_tick). this_cpu_ptr() on per-CPU mevt structure leads to access to cp15 to get cpu id and arithmetic operations. Container_of() is cheaper since it's just one asm instruction. This should work if used evt pointer is correct and owned by local mevt structure. For example, before this patch set_state_shutdown() looks like: 4a4: e92d4010 push {r4, lr} 4a8: e3004000 movw r4, #0 4ac: ebfffffe bl 0 4b0: e3003000 movw r3, #0 4b4: e3404000 movt r4, #0 4b8: e3403000 movt r3, #0 4bc: e7933100 ldr r3, [r3, r0, lsl #2] 4c0: e0844003 add r4, r4, r3 4c4: e59400c0 ldr r0, [r4, #192] ; 0xc0 4c8: ebffffd4 bl 420 4cc: e3a00000 mov r0, #0 4d0: e8bd8010 pop {r4, pc} With this patch: 4a4: e92d4010 push {r4, lr} 4a8: e59000c0 ldr r0, [r0, #192] ; 0xc0 4ac: ebffffdb bl 420 4b0: e3a00000 mov r0, #0 4b4: e8bd8010 pop {r4, pc} Also, for me size of exynos_mct.o decreased from 84588 bytes to 83956. Signed-off-by: Alexey Klimov Signed-off-by: Daniel Lezcano Reviewed-by: Krzysztof Kozlowski diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 029f96a..ff44082 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -382,24 +382,28 @@ static void exynos4_mct_tick_start(unsigned long cycles, static int exynos4_tick_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); + struct mct_clock_event_device *mevt; + mevt = container_of(evt, struct mct_clock_event_device, evt); exynos4_mct_tick_start(cycles, mevt); - return 0; } static int set_state_shutdown(struct clock_event_device *evt) { - exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick)); + struct mct_clock_event_device *mevt; + + mevt = container_of(evt, struct mct_clock_event_device, evt); + exynos4_mct_tick_stop(mevt); return 0; } static int set_state_periodic(struct clock_event_device *evt) { - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); + struct mct_clock_event_device *mevt; unsigned long cycles_per_jiffy; + mevt = container_of(evt, struct mct_clock_event_device, evt); cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); exynos4_mct_tick_stop(mevt); -- cgit v0.10.2 From f1c08c9bc8f475aa242270da30b6cc9232bd19e0 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 14 Oct 2015 11:24:17 +0200 Subject: clocksource/drivers/imx: Allow timer irq affinity change Allow the timer core to change the smp affinity of the broadcast timer irq by setting CLOCK_EVT_FEAT_DYNIRQ flag. For this to work the timer core needs to be told about the used irq. This reduces interrupt pressure and wakeups on CPU0 as well as vastly reducing the number of timer broadcast IPIs. Signed-off-by: Lucas Stach Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index 839aba9..99ec967 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c @@ -305,13 +305,14 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm) struct irqaction *act = &imxtm->act; ced->name = "mxc_timer1"; - ced->features = CLOCK_EVT_FEAT_ONESHOT; + ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; ced->set_state_shutdown = mxc_shutdown; ced->set_state_oneshot = mxc_set_oneshot; ced->tick_resume = mxc_shutdown; ced->set_next_event = imxtm->gpt->set_next_event; ced->rating = 200; ced->cpumask = cpumask_of(0); + ced->irq = imxtm->irq; clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), 0xff, 0xfffffffe); -- cgit v0.10.2 From ccd63ce471c9b69663783e919ca4dba9967cd690 Mon Sep 17 00:00:00 2001 From: Marc Gonzalez Date: Fri, 9 Oct 2015 16:59:18 +0200 Subject: clocksource/drivers/tango_xtal: Add new timer for Tango SoCs Sigma Designs Tango platforms provide a 27 MHz crystal oscillator. Use it for clocksource, sched_clock, and delay_timer. Signed-off-by: Marc Gonzalez Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a7726db..50b68bc 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -279,6 +279,10 @@ config CLKSRC_MIPS_GIC depends on MIPS_GIC select CLKSRC_OF +config CLKSRC_TANGO_XTAL + bool + select CLKSRC_OF + config CLKSRC_PXA def_bool y if ARCH_PXA || ARCH_SA1100 select CLKSRC_OF if OF diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 5c00863..fc9348d 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o +obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o obj-$(CONFIG_H8300) += h8300_timer8.o diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c new file mode 100644 index 0000000..d297b30 --- /dev/null +++ b/drivers/clocksource/tango_xtal.c @@ -0,0 +1,66 @@ +#include +#include +#include +#include +#include +#include +#include + +static void __iomem *xtal_in_cnt; +static struct delay_timer delay_timer; + +static unsigned long notrace read_xtal_counter(void) +{ + return readl_relaxed(xtal_in_cnt); +} + +static u64 notrace read_sched_clock(void) +{ + return read_xtal_counter(); +} + +static cycle_t read_clocksource(struct clocksource *cs) +{ + return read_xtal_counter(); +} + +static struct clocksource tango_xtal = { + .name = "tango-xtal", + .rating = 350, + .read = read_clocksource, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void __init tango_clocksource_init(struct device_node *np) +{ + struct clk *clk; + int xtal_freq, ret; + + xtal_in_cnt = of_iomap(np, 0); + if (xtal_in_cnt == NULL) { + pr_err("%s: invalid address\n", np->full_name); + return; + } + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("%s: invalid clock\n", np->full_name); + return; + } + + xtal_freq = clk_get_rate(clk); + delay_timer.freq = xtal_freq; + delay_timer.read_current_timer = read_xtal_counter; + + ret = clocksource_register_hz(&tango_xtal, xtal_freq); + if (ret != 0) { + pr_err("%s: registration failed\n", np->full_name); + return; + } + + sched_clock_register(read_sched_clock, 32, xtal_freq); + register_current_timer_delay(&delay_timer); +} + +CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init); -- cgit v0.10.2 From cb0f2538039c65f2bb64a9d427dbe9dd7d0f71a6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 19 Oct 2015 16:19:20 +0100 Subject: clocksource/drivers/armada-370-xp: Implement ARM delay timer Implement an ARM delay timer to be used for udelay() on Armada 37x platforms. This allows us to skip the delay loop calibration at boot, saving 180ms on the boot time of the kernel (which is around 10%). It also means that udelay() will be unaffected by CPU frequency changes when cpufreq is enabled on these platforms. Signed-off-by: Russell King Signed-off-by: Daniel Lezcano Tested-by: Gregory CLEMENT Acked-by: Gregory CLEMENT diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 2162796f..d93ec3c 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -45,6 +45,8 @@ #include #include +#include + /* * Timer block registers. */ @@ -249,6 +251,15 @@ struct syscore_ops armada_370_xp_timer_syscore_ops = { .resume = armada_370_xp_timer_resume, }; +static unsigned long armada_370_delay_timer_read(void) +{ + return ~readl(timer_base + TIMER0_VAL_OFF); +} + +static struct delay_timer armada_370_delay_timer = { + .read_current_timer = armada_370_delay_timer_read, +}; + static void __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; @@ -287,6 +298,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask); + armada_370_delay_timer.freq = timer_clk; + register_current_timer_delay(&armada_370_delay_timer); + /* * Set scale and timer for sched_clock. */ -- cgit v0.10.2 From 3d45ac4b35cbdf942f2a45b2b927f2ef6a8bda48 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 19 Oct 2015 04:35:44 -0600 Subject: timers/x86/hpet: Type adjustments Standardize on bool instead of an inconsistent mixture of u8 and plain 'int'. Also use u32 or 'unsigned int' instead of 'unsigned long' when a 32-bit type suffices, generating slightly better code on x86-64. Signed-off-by: Jan Beulich Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5624E3A002000078000AC49A@prv-mh.provo.novell.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 5fa9fb0..cc285ec 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h @@ -63,10 +63,10 @@ /* hpet memory map physical address */ extern unsigned long hpet_address; extern unsigned long force_hpet_address; -extern int boot_hpet_disable; +extern bool boot_hpet_disable; extern u8 hpet_blockid; -extern int hpet_force_user; -extern u8 hpet_msi_disable; +extern bool hpet_force_user; +extern bool hpet_msi_disable; extern int is_hpet_enabled(void); extern int hpet_enable(void); extern void hpet_disable(void); diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9f9cc68..db9a675 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -584,7 +584,7 @@ static void __init intel_graphics_stolen(int num, int slot, int func) static void __init force_disable_hpet(int num, int slot, int func) { #ifdef CONFIG_HPET_TIMER - boot_hpet_disable = 1; + boot_hpet_disable = true; pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); #endif } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 88b4da3..b8e6ff5 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -37,10 +37,10 @@ */ unsigned long hpet_address; u8 hpet_blockid; /* OS timer block num */ -u8 hpet_msi_disable; +bool hpet_msi_disable; #ifdef CONFIG_PCI_MSI -static unsigned long hpet_num_timers; +static unsigned int hpet_num_timers; #endif static void __iomem *hpet_virt_address; @@ -86,9 +86,9 @@ static inline void hpet_clear_mapping(void) /* * HPET command line enable / disable */ -int boot_hpet_disable; -int hpet_force_user; -static int hpet_verbose; +bool boot_hpet_disable; +bool hpet_force_user; +static bool hpet_verbose; static int __init hpet_setup(char *str) { @@ -98,11 +98,11 @@ static int __init hpet_setup(char *str) if (next) *next++ = 0; if (!strncmp("disable", str, 7)) - boot_hpet_disable = 1; + boot_hpet_disable = true; if (!strncmp("force", str, 5)) - hpet_force_user = 1; + hpet_force_user = true; if (!strncmp("verbose", str, 7)) - hpet_verbose = 1; + hpet_verbose = true; str = next; } return 1; @@ -111,7 +111,7 @@ __setup("hpet=", hpet_setup); static int __init disable_hpet(char *str) { - boot_hpet_disable = 1; + boot_hpet_disable = true; return 1; } __setup("nohpet", disable_hpet); @@ -124,7 +124,7 @@ static inline int is_hpet_capable(void) /* * HPET timer interrupt enable / disable */ -static int hpet_legacy_int_enabled; +static bool hpet_legacy_int_enabled; /** * is_hpet_enabled - check whether the hpet timer interrupt is enabled @@ -230,7 +230,7 @@ static struct clock_event_device hpet_clockevent; static void hpet_stop_counter(void) { - unsigned long cfg = hpet_readl(HPET_CFG); + u32 cfg = hpet_readl(HPET_CFG); cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } @@ -272,7 +272,7 @@ static void hpet_enable_legacy_int(void) cfg |= HPET_CFG_LEGACY; hpet_writel(cfg, HPET_CFG); - hpet_legacy_int_enabled = 1; + hpet_legacy_int_enabled = true; } static void hpet_legacy_clockevent_register(void) @@ -983,7 +983,7 @@ void hpet_disable(void) cfg = *hpet_boot_cfg; else if (hpet_legacy_int_enabled) { cfg &= ~HPET_CFG_LEGACY; - hpet_legacy_int_enabled = 0; + hpet_legacy_int_enabled = false; } cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); @@ -1121,8 +1121,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); static void hpet_disable_rtc_channel(void) { - unsigned long cfg; - cfg = hpet_readl(HPET_T1_CFG); + u32 cfg = hpet_readl(HPET_T1_CFG); cfg &= ~HPET_TN_ENABLE; hpet_writel(cfg, HPET_T1_CFG); } diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 176a0f9..cc457ff 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -524,7 +524,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, */ static void force_disable_hpet_msi(struct pci_dev *unused) { - hpet_msi_disable = 1; + hpet_msi_disable = true; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, -- cgit v0.10.2 From 03f136a2074b2b8890da4a24df7104558ad0da48 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 14 Jul 2015 19:24:45 +0200 Subject: timeconst: Update path in comment Signed-off-by: Jason A. Donenfeld Cc: hofrat@osadl.org Link: http://lkml.kernel.org/r/1436894685-5868-1-git-send-email-Jason@zx2c4.com Signed-off-by: Thomas Gleixner diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc index c7388de..c486889 100644 --- a/kernel/time/timeconst.bc +++ b/kernel/time/timeconst.bc @@ -39,7 +39,7 @@ define fmuls(b,n,d) { } define timeconst(hz) { - print "/* Automatically generated by kernel/timeconst.bc */\n" + print "/* Automatically generated by kernel/time/timeconst.bc */\n" print "/* Time conversion constants for HZ == ", hz, " */\n" print "\n" -- cgit v0.10.2