From a939e817aa7e199d2fff05a67cb745be32dd5c2d Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 1 Mar 2012 22:11:09 -0800 Subject: time: x86: Fix race switching from vsyscall to non-vsyscall clock When switching from a vsyscall capable to a non-vsyscall capable clocksource, there was a small race, where the last vsyscall gettimeofday before the switch might return a invalid time value using the new non-vsyscall enabled clocksource values after the switch is complete. This is due to the vsyscall code checking the vclock_mode once outside of the seqcount protected section. After it reads the vclock mode, it doesn't re-check that the sampled clock data that is obtained in the seqcount critical section still matches. The fix is to sample vclock_mode inside the protected section, and as long as it isn't VCLOCK_NONE, return the calculated value. If it has changed and is now VCLOCK_NONE, fall back to the syscall gettime calculation. v2: * Cleanup checks as suggested by tglx * Also fix same issue present in gettimeofday path CC: Andy Lutomirski CC: Thomas Gleixner Signed-off-by: John Stultz diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 6bc0e72..7eeb1f6 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -70,14 +70,26 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) return ret; } +notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) +{ + long ret; + + asm("syscall" : "=a" (ret) : + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); + return ret; +} + + notrace static inline long vgetns(void) { long v; cycles_t cycles; if (gtod->clock.vclock_mode == VCLOCK_TSC) cycles = vread_tsc(); - else + else if (gtod->clock.vclock_mode == VCLOCK_HPET) cycles = vread_hpet(); + else + return 0; v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; return (v * gtod->clock.mult) >> gtod->clock.shift; } @@ -85,21 +97,28 @@ notrace static inline long vgetns(void) notrace static noinline int do_realtime(struct timespec *ts) { unsigned long seq, ns; + int mode; + do { seq = read_seqbegin(>od->lock); + mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ts->tv_nsec = gtod->wall_time_nsec; ns = vgetns(); } while (unlikely(read_seqretry(>od->lock, seq))); + timespec_add_ns(ts, ns); - return 0; + return mode; } notrace static noinline int do_monotonic(struct timespec *ts) { unsigned long seq, ns, secs; + int mode; + do { seq = read_seqbegin(>od->lock); + mode = gtod->clock.vclock_mode; secs = gtod->wall_time_sec; ns = gtod->wall_time_nsec + vgetns(); secs += gtod->wall_to_monotonic.tv_sec; @@ -116,7 +135,7 @@ notrace static noinline int do_monotonic(struct timespec *ts) ts->tv_sec = secs; ts->tv_nsec = ns; - return 0; + return mode; } notrace static noinline int do_realtime_coarse(struct timespec *ts) @@ -156,14 +175,14 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { + int ret = VCLOCK_NONE; + switch (clock) { case CLOCK_REALTIME: - if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) - return do_realtime(ts); + ret = do_realtime(ts); break; case CLOCK_MONOTONIC: - if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) - return do_monotonic(ts); + ret = do_monotonic(ts); break; case CLOCK_REALTIME_COARSE: return do_realtime_coarse(ts); @@ -171,32 +190,33 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) return do_monotonic_coarse(ts); } - return vdso_fallback_gettime(clock, ts); + if (ret == VCLOCK_NONE) + return vdso_fallback_gettime(clock, ts); + return 0; } int clock_gettime(clockid_t, struct timespec *) __attribute__((weak, alias("__vdso_clock_gettime"))); notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { - long ret; - if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { - if (likely(tv != NULL)) { - BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != - offsetof(struct timespec, tv_nsec) || - sizeof(*tv) != sizeof(struct timespec)); - do_realtime((struct timespec *)tv); - tv->tv_usec /= 1000; - } - if (unlikely(tz != NULL)) { - /* Avoid memcpy. Some old compilers fail to inline it */ - tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; - tz->tz_dsttime = gtod->sys_tz.tz_dsttime; - } - return 0; + long ret = VCLOCK_NONE; + + if (likely(tv != NULL)) { + BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != + offsetof(struct timespec, tv_nsec) || + sizeof(*tv) != sizeof(struct timespec)); + ret = do_realtime((struct timespec *)tv); + tv->tv_usec /= 1000; } - asm("syscall" : "=a" (ret) : - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); - return ret; + if (unlikely(tz != NULL)) { + /* Avoid memcpy. Some old compilers fail to inline it */ + tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; + tz->tz_dsttime = gtod->sys_tz.tz_dsttime; + } + + if (ret == VCLOCK_NONE) + return vdso_fallback_gtod(tv, tz); + return 0; } int gettimeofday(struct timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); -- cgit v0.10.2 From f695cf94837de53864180400cbac42cfa370426f Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 14 Mar 2012 16:38:15 -0700 Subject: time: Fix change_clocksource locking change_clocksource() fails to grab locks or call timekeeping_update(), which leaves a race window for time inconsistencies. This adds proper locking and a call to timekeeping_update() to fix this. CC: Andy Lutomirski CC: Thomas Gleixner Signed-off-by: John Stultz diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 403c2a0..b53da5e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -448,9 +448,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset); static int change_clocksource(void *data) { struct clocksource *new, *old; + unsigned long flags; new = (struct clocksource *) data; + write_seqlock_irqsave(&timekeeper.lock, flags); + timekeeping_forward_now(); if (!new->enable || new->enable(new) == 0) { old = timekeeper.clock; @@ -458,6 +461,10 @@ static int change_clocksource(void *data) if (old->disable) old->disable(old); } + timekeeping_update(true); + + write_sequnlock_irqrestore(&timekeeper.lock, flags); + return 0; } -- cgit v0.10.2 From a4ca1298d8a0472a45624fa5fb99f90f0f367187 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Feb 2012 19:46:03 +0000 Subject: time: Remove bogus comments There is no global irq lock which makes a syscall magically SMP safe. Remove the outdated comment concerning do_settimeofday() as well. Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz diff --git a/kernel/time.c b/kernel/time.c index 73e416d..ba744cf 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) return error; if (tz) { - /* SMP safe, global irq locking makes it work. */ sys_tz = *tz; update_vsyscall_tz(); if (firsttime) { @@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) } } if (tv) - { - /* SMP safe, again the code in arch/foo/time.c should - * globally block out interrupts when it runs. - */ return do_settimeofday(tv); - } return 0; } -- cgit v0.10.2 From 6c260d586343f7f78239d90aa9e2cfed02f74ff3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Feb 2012 19:46:04 +0000 Subject: x86: vdso: Remove bogus locking in update_vsyscall_tz() Changing the sequence count in update_vsyscall_tz() is completely pointless. The vdso code copies the data unprotected. There is no point to change this as sys_tz is nowhere protected at all. See sys_gettimeofday(). Reviewed-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index b07ba93..33385c1 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -80,12 +80,7 @@ early_param("vsyscall", vsyscall_setup); void update_vsyscall_tz(void) { - unsigned long flags; - - write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); - /* sys_tz has changed */ vsyscall_gtod_data.sys_tz = sys_tz; - write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, -- cgit v0.10.2 From 2ab516575f2f273b19d95140d02c54612201e80a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Feb 2012 19:46:04 +0000 Subject: x86: vdso: Use seqcount instead of seqlock The update of the vdso data happens under xtime_lock, so adding a nested lock is pointless. Just use a seqcount to sync the readers. Reviewed-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 815285b..1f00717 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -5,7 +5,7 @@ #include struct vsyscall_gtod_data { - seqlock_t lock; + seqcount_t seq; /* open coded 'struct timespec' */ time_t wall_time_sec; diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 33385c1..cdc95a7 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -52,10 +52,7 @@ #include "vsyscall_trace.h" DEFINE_VVAR(int, vgetcpu_mode); -DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = -{ - .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), -}; +DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data); static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; @@ -86,9 +83,7 @@ void update_vsyscall_tz(void) void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, struct clocksource *clock, u32 mult) { - unsigned long flags; - - write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + write_seqcount_begin(&vsyscall_gtod_data.seq); /* copy vsyscall data */ vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; @@ -101,7 +96,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, vsyscall_gtod_data.wall_to_monotonic = *wtm; vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); - write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + write_seqcount_end(&vsyscall_gtod_data.seq); } static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 7eeb1f6..944c5e5 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -100,12 +100,12 @@ notrace static noinline int do_realtime(struct timespec *ts) int mode; do { - seq = read_seqbegin(>od->lock); + seq = read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ts->tv_nsec = gtod->wall_time_nsec; ns = vgetns(); - } while (unlikely(read_seqretry(>od->lock, seq))); + } while (unlikely(read_seqcount_retry(>od->seq, seq))); timespec_add_ns(ts, ns); return mode; @@ -117,13 +117,13 @@ notrace static noinline int do_monotonic(struct timespec *ts) int mode; do { - seq = read_seqbegin(>od->lock); + seq = read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; secs = gtod->wall_time_sec; ns = gtod->wall_time_nsec + vgetns(); secs += gtod->wall_to_monotonic.tv_sec; ns += gtod->wall_to_monotonic.tv_nsec; - } while (unlikely(read_seqretry(>od->lock, seq))); + } while (unlikely(read_seqcount_retry(>od->seq, seq))); /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec * are all guaranteed to be nonnegative. @@ -142,10 +142,10 @@ notrace static noinline int do_realtime_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqbegin(>od->lock); + seq = read_seqcount_begin(>od->seq); ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; - } while (unlikely(read_seqretry(>od->lock, seq))); + } while (unlikely(read_seqcount_retry(>od->seq, seq))); return 0; } @@ -153,12 +153,12 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) { unsigned long seq, ns, secs; do { - seq = read_seqbegin(>od->lock); + seq = read_seqcount_begin(>od->seq); secs = gtod->wall_time_coarse.tv_sec; ns = gtod->wall_time_coarse.tv_nsec; secs += gtod->wall_to_monotonic.tv_sec; ns += gtod->wall_to_monotonic.tv_nsec; - } while (unlikely(read_seqretry(>od->lock, seq))); + } while (unlikely(read_seqcount_retry(>od->seq, seq))); /* wall_time_nsec and wall_to_monotonic.tv_nsec are * guaranteed to be between 0 and NSEC_PER_SEC. -- cgit v0.10.2 From 74a622be3db2898251cb524d1edbeaea589f2723 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Feb 2012 19:46:05 +0000 Subject: ia64: vsyscall: Use seqcount instead of seqlock The update of the vdso data happens under xtime_lock, so adding a nested lock is pointless. Just use a seqcount to sync the readers. Signed-off-by: Thomas Gleixner Cc: Tony Luck Signed-off-by: John Stultz diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index af56501..106aeb6 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -269,8 +269,8 @@ void foo(void) BLANK(); /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ - DEFINE(IA64_GTOD_LOCK_OFFSET, - offsetof (struct fsyscall_gtod_data_t, lock)); + DEFINE(IA64_GTOD_SEQ_OFFSET, + offsetof (struct fsyscall_gtod_data_t, seq); DEFINE(IA64_GTOD_WALL_TIME_OFFSET, offsetof (struct fsyscall_gtod_data_t, wall_time)); DEFINE(IA64_GTOD_MONO_TIME_OFFSET, diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 331d42b..fa77de7 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -174,7 +174,7 @@ ENTRY(fsys_set_tid_address) FSYS_RETURN END(fsys_set_tid_address) -#if IA64_GTOD_LOCK_OFFSET !=0 +#if IA64_GTOD_SEQ_OFFSET !=0 #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t #endif #if IA64_ITC_JITTER_OFFSET !=0 diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h index 57d2ee6..146b15b 100644 --- a/arch/ia64/kernel/fsyscall_gtod_data.h +++ b/arch/ia64/kernel/fsyscall_gtod_data.h @@ -6,7 +6,7 @@ */ struct fsyscall_gtod_data_t { - seqlock_t lock; + seqcount_t seq; struct timespec wall_time; struct timespec monotonic_time; cycle_t clk_mask; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 43920de..8e991a0 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -35,9 +35,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs); -struct fsyscall_gtod_data_t fsyscall_gtod_data = { - .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock), -}; +struct fsyscall_gtod_data_t fsyscall_gtod_data; struct itc_jitter_data_t itc_jitter_data; @@ -460,9 +458,7 @@ void update_vsyscall_tz(void) void update_vsyscall(struct timespec *wall, struct timespec *wtm, struct clocksource *c, u32 mult) { - unsigned long flags; - - write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); + write_seqcount_begin(&fsyscall_gtod_data.seq); /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; @@ -485,6 +481,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm, fsyscall_gtod_data.monotonic_time.tv_sec++; } - write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); + write_seqcount_end(&fsyscall_gtod_data.seq); } -- cgit v0.10.2 From 4a649903f91232d02284d53724b0a45728111767 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 6 Mar 2012 17:16:09 -0800 Subject: rtc: Provide flag for rtc devices that don't support UIE Richard Weinberger noticed that on some RTC hardware that doesn't support UIE mode, due to coarse granular alarms (like 1minute resolution), the current virtualized RTC support doesn't properly error out when UIE is enabled. Instead the current code queues an alarm for the next second, but it won't fire until up to a miniute later. This patch provides a generic way to flag this sort of hardware and fixes the issue on the mpc5121 where Richard noticed the problem. CC: stable@vger.kernel.org Reported-by: Richard Weinberger Tested-by: Richard Weinberger Signed-off-by: John Stultz diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index dc87eda..eb415bd 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -458,6 +458,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) if (rtc->uie_rtctimer.enabled == enabled) goto out; + if (rtc->uie_unsupported) { + err = -EINVAL; + goto out; + } + if (enabled) { struct rtc_time tm; ktime_t now, onesec; diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c index 9d3cacc..613447a 100644 --- a/drivers/rtc/rtc-mpc5121.c +++ b/drivers/rtc/rtc-mpc5121.c @@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op) &mpc5200_rtc_ops, THIS_MODULE); } + rtc->rtc->uie_unsupported = 1; + if (IS_ERR(rtc->rtc)) { err = PTR_ERR(rtc->rtc); goto out_free_irq; diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 93f4d03..fcabfb4 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -202,7 +202,8 @@ struct rtc_device struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; - + /* Some hardware can't support UIE mode */ + int uie_unsupported; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; -- cgit v0.10.2 From 57779dc2b3b75bee05ef5d1ada47f615f7a13932 Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Tue, 21 Feb 2012 18:19:55 -0800 Subject: x86, tsc: Skip refined tsc calibration on systems with reliable TSC While running the latest Linux as guest under VMware in highly over-committed situations, we have seen cases when the refined TSC algorithm fails to get a valid tsc_start value in tsc_refine_calibration_work from multiple attempts. As a result the kernel keeps on scheduling the tsc_irqwork task for later. Subsequently after several attempts when it gets a valid start value it goes through the refined calibration and either bails out or uses the new results. Given that the kernel originally read the TSC frequency from the platform, which is the best it can get, I don't think there is much value in refining it. So for systems which get the TSC frequency from the platform we should skip the refined tsc algorithm. We can use the TSC_RELIABLE cpu cap flag to detect this, right now it is set only on VMware and for Moorestown Penwell both of which have there own TSC calibration methods. Signed-off-by: Alok N Kataria Cc: John Stultz Cc: Dirk Brandewie Cc: Alan Cox Cc: stable@kernel.org [jstultz: Reworked to simply not schedule the refining work, rather then scheduling the work and bombing out later] Signed-off-by: John Stultz diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index a62c201..6fcfcb3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -932,6 +932,16 @@ static int __init init_tsc_clocksource(void) clocksource_tsc.rating = 0; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; } + + /* + * Trust the results of the earlier calibration on systems + * exporting a reliable TSC. + */ + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { + clocksource_register_khz(&clocksource_tsc, tsc_khz); + return 0; + } + schedule_delayed_work(&tsc_irqwork, 0); return 0; } -- cgit v0.10.2 From 6b43ae8a619d17c4935c3320d2ef9e92bdeed05d Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 15 Mar 2012 13:04:03 -0700 Subject: ntp: Fix leap-second hrtimer livelock Since commit 7dffa3c673fbcf835cd7be80bb4aec8ad3f51168 the ntp subsystem has used an hrtimer for triggering the leapsecond adjustment. However, this can cause a potential livelock. Thomas diagnosed this as the following pattern: CPU 0 CPU 1 do_adjtimex() spin_lock_irq(&ntp_lock); process_adjtimex_modes(); timer_interrupt() process_adj_status(); do_timer() ntp_start_leap_timer(); write_lock(&xtime_lock); hrtimer_start(); update_wall_time(); hrtimer_reprogram(); ntp_tick_length() tick_program_event() spin_lock(&ntp_lock); clockevents_program_event() ktime_get() seq = req_seqbegin(xtime_lock); This patch tries to avoid the problem by reverting back to not using an hrtimer to inject leapseconds, and instead we handle the leapsecond processing in the second_overflow() function. The downside to this change is that on systems that support highres timers, the leap second processing will occur on a HZ tick boundary, (ie: ~1-10ms, depending on HZ) after the leap second instead of possibly sooner (~34us in my tests w/ x86_64 lapic). This patch applies on top of tip/timers/core. CC: Sasha Levin CC: Thomas Gleixner Reported-by: Sasha Levin Diagnoised-by: Thomas Gleixner Tested-by: Sasha Levin Signed-off-by: John Stultz diff --git a/include/linux/timex.h b/include/linux/timex.h index b75e186..99bc88b 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -252,7 +252,7 @@ extern void ntp_clear(void); /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ extern u64 ntp_tick_length(void); -extern void second_overflow(void); +extern int second_overflow(unsigned long secs); extern int do_adjtimex(struct timex *); extern void hardpps(const struct timespec *, const struct timespec *); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 6e039b1..3d17ebd 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -34,8 +34,6 @@ unsigned long tick_nsec; static u64 tick_length; static u64 tick_length_base; -static struct hrtimer leap_timer; - #define MAX_TICKADJ 500LL /* usecs */ #define MAX_TICKADJ_SCALED \ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) @@ -381,70 +379,63 @@ u64 ntp_tick_length(void) /* - * Leap second processing. If in leap-insert state at the end of the - * day, the system clock is set back one second; if in leap-delete - * state, the system clock is set ahead one second. + * this routine handles the overflow of the microsecond field + * + * The tricky bits of code to handle the accurate clock support + * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. + * They were originally developed for SUN and DEC kernels. + * All the kudos should go to Dave for this stuff. + * + * Also handles leap second processing, and returns leap offset */ -static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) +int second_overflow(unsigned long secs) { - enum hrtimer_restart res = HRTIMER_NORESTART; - unsigned long flags; + s64 delta; int leap = 0; + unsigned long flags; spin_lock_irqsave(&ntp_lock, flags); + + /* + * Leap second processing. If in leap-insert state at the end of the + * day, the system clock is set back one second; if in leap-delete + * state, the system clock is set ahead one second. + */ switch (time_state) { case TIME_OK: + if (time_status & STA_INS) + time_state = TIME_INS; + else if (time_status & STA_DEL) + time_state = TIME_DEL; break; case TIME_INS: - leap = -1; - time_state = TIME_OOP; - printk(KERN_NOTICE - "Clock: inserting leap second 23:59:60 UTC\n"); - hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); - res = HRTIMER_RESTART; + if (secs % 86400 == 0) { + leap = -1; + time_state = TIME_OOP; + printk(KERN_NOTICE + "Clock: inserting leap second 23:59:60 UTC\n"); + } break; case TIME_DEL: - leap = 1; - time_tai--; - time_state = TIME_WAIT; - printk(KERN_NOTICE - "Clock: deleting leap second 23:59:59 UTC\n"); + if ((secs + 1) % 86400 == 0) { + leap = 1; + time_tai--; + time_state = TIME_WAIT; + printk(KERN_NOTICE + "Clock: deleting leap second 23:59:59 UTC\n"); + } break; case TIME_OOP: time_tai++; time_state = TIME_WAIT; - /* fall through */ + break; + case TIME_WAIT: if (!(time_status & (STA_INS | STA_DEL))) time_state = TIME_OK; break; } - spin_unlock_irqrestore(&ntp_lock, flags); - - /* - * We have to call this outside of the ntp_lock to keep - * the proper locking hierarchy - */ - if (leap) - timekeeping_leap_insert(leap); - - return res; -} - -/* - * this routine handles the overflow of the microsecond field - * - * The tricky bits of code to handle the accurate clock support - * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. - * They were originally developed for SUN and DEC kernels. - * All the kudos should go to Dave for this stuff. - */ -void second_overflow(void) -{ - s64 delta; - unsigned long flags; - spin_lock_irqsave(&ntp_lock, flags); /* Bump the maxerror field */ time_maxerror += MAXFREQ / NSEC_PER_USEC; @@ -481,8 +472,13 @@ void second_overflow(void) tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT; time_adjust = 0; + + + out: spin_unlock_irqrestore(&ntp_lock, flags); + + return leap; } #ifdef CONFIG_GENERIC_CMOS_UPDATE @@ -544,27 +540,6 @@ static void notify_cmos_timer(void) static inline void notify_cmos_timer(void) { } #endif -/* - * Start the leap seconds timer: - */ -static inline void ntp_start_leap_timer(struct timespec *ts) -{ - long now = ts->tv_sec; - - if (time_status & STA_INS) { - time_state = TIME_INS; - now += 86400 - now % 86400; - hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS); - - return; - } - - if (time_status & STA_DEL) { - time_state = TIME_DEL; - now += 86400 - (now + 1) % 86400; - hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS); - } -} /* * Propagate a new txc->status value into the NTP state: @@ -589,22 +564,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) time_status &= STA_RONLY; time_status |= txc->status & ~STA_RONLY; - switch (time_state) { - case TIME_OK: - ntp_start_leap_timer(ts); - break; - case TIME_INS: - case TIME_DEL: - time_state = TIME_OK; - ntp_start_leap_timer(ts); - case TIME_WAIT: - if (!(time_status & (STA_INS | STA_DEL))) - time_state = TIME_OK; - break; - case TIME_OOP: - hrtimer_restart(&leap_timer); - break; - } } /* * Called with the xtime lock held, so we can access and modify @@ -686,9 +645,6 @@ int do_adjtimex(struct timex *txc) (txc->tick < 900000/USER_HZ || txc->tick > 1100000/USER_HZ)) return -EINVAL; - - if (txc->modes & ADJ_STATUS && time_state != TIME_OK) - hrtimer_cancel(&leap_timer); } if (txc->modes & ADJ_SETOFFSET) { @@ -1010,6 +966,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup); void __init ntp_init(void) { ntp_clear(); - hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); - leap_timer.function = ntp_leap_second; } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b53da5e..5d76e09 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp) } -void timekeeping_leap_insert(int leapsecond) -{ - unsigned long flags; - - write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeper.xtime.tv_sec += leapsecond; - timekeeper.wall_to_monotonic.tv_sec -= leapsecond; - timekeeping_update(false); - write_sequnlock_irqrestore(&timekeeper.lock, flags); - -} - /** * timekeeping_forward_now - update clock to the current time * @@ -969,9 +957,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; while (timekeeper.xtime_nsec >= nsecps) { + int leap; timekeeper.xtime_nsec -= nsecps; timekeeper.xtime.tv_sec++; - second_overflow(); + leap = second_overflow(timekeeper.xtime.tv_sec); + timekeeper.xtime.tv_sec += leap; } /* Accumulate raw time */ @@ -1082,9 +1072,11 @@ static void update_wall_time(void) * xtime.tv_nsec isn't larger then NSEC_PER_SEC */ if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { + int leap; timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; timekeeper.xtime.tv_sec++; - second_overflow(); + leap = second_overflow(timekeeper.xtime.tv_sec); + timekeeper.xtime.tv_sec += leap; } timekeeping_update(false); -- cgit v0.10.2 From ad30dfa94c5cc23931c822922a50bd163ab293a5 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 23 Mar 2012 15:52:25 -0700 Subject: alarmtimer: Make sure we initialize the rtctimer jonghwan Choi reported seeing warnings with the alarmtimer code at suspend/resume time, and pointed out that the rtctimer isn't being properly initialized. This patch corrects this issue. Reported-by: jonghwan Choi Signed-off-by: John Stultz diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 8a46f5d..c165488 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -46,9 +46,10 @@ static struct alarm_base { static ktime_t freezer_delta; static DEFINE_SPINLOCK(freezer_delta_lock); +static struct rtc_timer rtctimer; + #ifdef CONFIG_RTC_CLASS /* rtc timer and device for setting alarm wakeups at suspend */ -static struct rtc_timer rtctimer; static struct rtc_device *rtcdev; static DEFINE_SPINLOCK(rtcdev_lock); @@ -783,6 +784,8 @@ static int __init alarmtimer_init(void) .nsleep = alarm_timer_nsleep, }; + rtc_timer_init(&rtctimer, NULL, NULL); + posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); -- cgit v0.10.2 From e919cfd42da54d400e7e0385f22cae3672dcf874 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 22 Mar 2012 19:14:46 -0700 Subject: time: Avoid scary backtraces when warning of > 11% adj Folks have been getting a number of warnings about time adjustments > 11%. The WARN_ON leaves a big useless backtrace so this patch removes it for a printk_once(). I'm still working to narrow down the cause of the > 11% adjustment. Signed-off-by: John Stultz diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5d76e09..16a175b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -869,13 +869,15 @@ static void timekeeping_adjust(s64 offset) } else /* No adjustment needed */ return; - WARN_ONCE(timekeeper.clock->maxadj && - (timekeeper.mult + adj > timekeeper.clock->mult + - timekeeper.clock->maxadj), - "Adjusting %s more then 11%% (%ld vs %ld)\n", + if (unlikely(timekeeper.clock->maxadj && + (timekeeper.mult + adj > + timekeeper.clock->mult + timekeeper.clock->maxadj))) { + printk_once(KERN_WARNING + "Adjusting %s more than 11%% (%ld vs %ld)\n", timekeeper.clock->name, (long)timekeeper.mult + adj, (long)timekeeper.clock->mult + timekeeper.clock->maxadj); + } /* * So the following can be confusing. * -- cgit v0.10.2 From 335dd85895abeca1957d5eaa3013dfe8dc60c7d7 Mon Sep 17 00:00:00 2001 From: Cesar Eduardo Barros Date: Sat, 11 Feb 2012 17:54:59 -0200 Subject: time: remove no_sync_cmos_clock Commit 9863c90f682fba34cdc26c3437e8c00da6c83fa4 (x86, vmware: Remove deprecated VMI kernel support) removed the only place which set no_sync_cmos_clock. Since that commit, this variable is never set. Signed-off-by: Cesar Eduardo Barros Signed-off-by: John Stultz diff --git a/include/linux/time.h b/include/linux/time.h index b306178..97734e9 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -116,7 +116,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, extern void read_persistent_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts); extern int update_persistent_clock(struct timespec now); -extern int no_sync_cmos_clock __read_mostly; void timekeeping_init(void); extern int timekeeping_suspended; diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 3d17ebd..f03fd83 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -483,9 +483,6 @@ out: #ifdef CONFIG_GENERIC_CMOS_UPDATE -/* Disable the cmos update - used by virtualization and embedded */ -int no_sync_cmos_clock __read_mostly; - static void sync_cmos_clock(struct work_struct *work); static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); @@ -532,8 +529,7 @@ static void sync_cmos_clock(struct work_struct *work) static void notify_cmos_timer(void) { - if (!no_sync_cmos_clock) - schedule_delayed_work(&sync_cmos_work, 0); + schedule_delayed_work(&sync_cmos_work, 0); } #else -- cgit v0.10.2 From 88b28adf6fcdd6d10a1cfc7765bb200d7366a265 Mon Sep 17 00:00:00 2001 From: Jim Cromie Date: Wed, 14 Mar 2012 21:28:56 -0600 Subject: kernel-time: fix s/then/than/ spelling errors Use than for comparisons, like more than. CC: John Stultz Signed-off-by: Jim Cromie Signed-off-by: John Stultz diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index a45ca16..c958338 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) { u64 ret; /* - * We won't try to correct for more then 11% adjustments (110,000 ppm), + * We won't try to correct for more than 11% adjustments (110,000 ppm), */ ret = (u64)cs->mult * 11; do_div(ret,100); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 16a175b..51b9856 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -822,7 +822,7 @@ static void timekeeping_adjust(s64 offset) int adj; /* - * The point of this is to check if the error is greater then half + * The point of this is to check if the error is greater than half * an interval. * * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. @@ -830,7 +830,7 @@ static void timekeeping_adjust(s64 offset) * Note we subtract one in the shift, so that error is really error*2. * This "saves" dividing(shifting) interval twice, but keeps the * (error > interval) comparison as still measuring if error is - * larger then half an interval. + * larger than half an interval. * * Note: It does not "save" on aggravation when reading the code. */ @@ -838,7 +838,7 @@ static void timekeeping_adjust(s64 offset) if (error > interval) { /* * We now divide error by 4(via shift), which checks if - * the error is greater then twice the interval. + * the error is greater than twice the interval. * If it is greater, we need a bigadjust, if its smaller, * we can adjust by 1. */ @@ -949,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; u64 raw_nsecs; - /* If the offset is smaller then a shifted interval, do nothing */ + /* If the offset is smaller than a shifted interval, do nothing */ if (offset < timekeeper.cycle_interval<= timekeeper.cycle_interval) { @@ -1071,7 +1071,7 @@ static void update_wall_time(void) /* * Finally, make sure that after the rounding - * xtime.tv_nsec isn't larger then NSEC_PER_SEC + * xtime.tv_nsec isn't larger than NSEC_PER_SEC */ if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { int leap; -- cgit v0.10.2 From 91ec87d57fc38c529034e853687dfb7756de5406 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 22 Mar 2012 21:15:51 -0700 Subject: x86-64: Simplify and optimize vdso clock_gettime monotonic variants We used to store the wall-to-monotonic offset and the realtime base. It's faster to precompute the monotonic base. This is about a 3% speedup on Sandy Bridge for CLOCK_MONOTONIC. It's much more impressive for CLOCK_MONOTONIC_COARSE. Signed-off-by: Andy Lutomirski Signed-off-by: John Stultz diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 1f00717..8b38be2 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -7,11 +7,6 @@ struct vsyscall_gtod_data { seqcount_t seq; - /* open coded 'struct timespec' */ - time_t wall_time_sec; - u32 wall_time_nsec; - - struct timezone sys_tz; struct { /* extract of a clocksource struct */ int vclock_mode; cycle_t cycle_last; @@ -19,8 +14,16 @@ struct vsyscall_gtod_data { u32 mult; u32 shift; } clock; - struct timespec wall_to_monotonic; + + /* open coded 'struct timespec' */ + time_t wall_time_sec; + u32 wall_time_nsec; + u32 monotonic_time_nsec; + time_t monotonic_time_sec; + + struct timezone sys_tz; struct timespec wall_time_coarse; + struct timespec monotonic_time_coarse; }; extern struct vsyscall_gtod_data vsyscall_gtod_data; diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index cdc95a7..4285f1f 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -84,6 +84,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, struct clocksource *clock, u32 mult) { write_seqcount_begin(&vsyscall_gtod_data.seq); + struct timespec monotonic; /* copy vsyscall data */ vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; @@ -91,10 +92,17 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, vsyscall_gtod_data.clock.mask = clock->mask; vsyscall_gtod_data.clock.mult = mult; vsyscall_gtod_data.clock.shift = clock->shift; + vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; - vsyscall_gtod_data.wall_to_monotonic = *wtm; + + monotonic = timespec_add(*wall_time, *wtm); + vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec; + vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec; + vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); + vsyscall_gtod_data.monotonic_time_coarse = + timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm); write_seqcount_end(&vsyscall_gtod_data.seq); } diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 944c5e5..6eea70b8 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -113,27 +113,17 @@ notrace static noinline int do_realtime(struct timespec *ts) notrace static noinline int do_monotonic(struct timespec *ts) { - unsigned long seq, ns, secs; + unsigned long seq, ns; int mode; do { seq = read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; - secs = gtod->wall_time_sec; - ns = gtod->wall_time_nsec + vgetns(); - secs += gtod->wall_to_monotonic.tv_sec; - ns += gtod->wall_to_monotonic.tv_nsec; + ts->tv_sec = gtod->monotonic_time_sec; + ts->tv_nsec = gtod->monotonic_time_nsec; + ns = vgetns(); } while (unlikely(read_seqcount_retry(>od->seq, seq))); - - /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec - * are all guaranteed to be nonnegative. - */ - while (ns >= NSEC_PER_SEC) { - ns -= NSEC_PER_SEC; - ++secs; - } - ts->tv_sec = secs; - ts->tv_nsec = ns; + timespec_add_ns(ts, ns); return mode; } @@ -151,25 +141,13 @@ notrace static noinline int do_realtime_coarse(struct timespec *ts) notrace static noinline int do_monotonic_coarse(struct timespec *ts) { - unsigned long seq, ns, secs; + unsigned long seq; do { seq = read_seqcount_begin(>od->seq); - secs = gtod->wall_time_coarse.tv_sec; - ns = gtod->wall_time_coarse.tv_nsec; - secs += gtod->wall_to_monotonic.tv_sec; - ns += gtod->wall_to_monotonic.tv_nsec; + ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; + ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); - /* wall_time_nsec and wall_to_monotonic.tv_nsec are - * guaranteed to be between 0 and NSEC_PER_SEC. - */ - if (ns >= NSEC_PER_SEC) { - ns -= NSEC_PER_SEC; - ++secs; - } - ts->tv_sec = secs; - ts->tv_nsec = ns; - return 0; } -- cgit v0.10.2 From 5f293474c4c6c4dc2baaf2dfd486748b5986de76 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 22 Mar 2012 21:15:52 -0700 Subject: x86-64: Inline vdso clock_gettime helpers This is about a 3% speedup on Sandy Bridge. Signed-off-by: Andy Lutomirski Signed-off-by: John Stultz diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 6eea70b8..885eff4 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -94,7 +94,8 @@ notrace static inline long vgetns(void) return (v * gtod->clock.mult) >> gtod->clock.shift; } -notrace static noinline int do_realtime(struct timespec *ts) +/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ +notrace static int __always_inline do_realtime(struct timespec *ts) { unsigned long seq, ns; int mode; @@ -111,7 +112,7 @@ notrace static noinline int do_realtime(struct timespec *ts) return mode; } -notrace static noinline int do_monotonic(struct timespec *ts) +notrace static int do_monotonic(struct timespec *ts) { unsigned long seq, ns; int mode; @@ -128,7 +129,7 @@ notrace static noinline int do_monotonic(struct timespec *ts) return mode; } -notrace static noinline int do_realtime_coarse(struct timespec *ts) +notrace static int do_realtime_coarse(struct timespec *ts) { unsigned long seq; do { @@ -139,7 +140,7 @@ notrace static noinline int do_realtime_coarse(struct timespec *ts) return 0; } -notrace static noinline int do_monotonic_coarse(struct timespec *ts) +notrace static int do_monotonic_coarse(struct timespec *ts) { unsigned long seq; do { -- cgit v0.10.2 From 68fe7b23d559763a2e19e5fc1cf7036e4aaecb10 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 24 Mar 2012 09:29:22 +0100 Subject: x86: vdso: Put declaration before code Sigh, warnings are there for a reason. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: John Stultz diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 4285f1f..d5c6986 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -83,9 +83,10 @@ void update_vsyscall_tz(void) void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, struct clocksource *clock, u32 mult) { - write_seqcount_begin(&vsyscall_gtod_data.seq); struct timespec monotonic; + write_seqcount_begin(&vsyscall_gtod_data.seq); + /* copy vsyscall data */ vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; -- cgit v0.10.2 From c5e14e763046b11dd8bf57b5dc9f3ab444af8e60 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 24 Mar 2012 12:46:23 +0100 Subject: alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n rtc_timer_init() is not available when CONFIG_RTC_CLASS=n. Provide a proper wrapper in the RTC section of alarmtimer.c Reported-by: Ingo Molnar Signed-off-by: Thomas Gleixner Cc: John Stultz diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index c165488..8a538c5 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -46,10 +46,9 @@ static struct alarm_base { static ktime_t freezer_delta; static DEFINE_SPINLOCK(freezer_delta_lock); -static struct rtc_timer rtctimer; - #ifdef CONFIG_RTC_CLASS /* rtc timer and device for setting alarm wakeups at suspend */ +static struct rtc_timer rtctimer; static struct rtc_device *rtcdev; static DEFINE_SPINLOCK(rtcdev_lock); @@ -97,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev, return 0; } +static inline void alarmtimer_rtc_timer_init(void) +{ + rtc_timer_init(&rtctimer, NULL, NULL); +} + static struct class_interface alarmtimer_rtc_interface = { .add_dev = &alarmtimer_rtc_add_device, }; @@ -118,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void) #define rtcdev (NULL) static inline int alarmtimer_rtc_interface_setup(void) { return 0; } static inline void alarmtimer_rtc_interface_remove(void) { } +static inline void alarmtimer_rtc_timer_init(void) { } #endif /** @@ -784,7 +789,7 @@ static int __init alarmtimer_init(void) .nsleep = alarm_timer_nsleep, }; - rtc_timer_init(&rtctimer, NULL, NULL); + alarmtimer_rtc_timer_init(); posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); -- cgit v0.10.2 From 646783a389828e76e813f50791f7999429c821bc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 27 Mar 2012 15:06:15 +0200 Subject: ia64: vsyscall: Add missing paranthesis commit 74a622b (ia64: vsyscall: Use seqcount instead of seqlock) broke the ia64 build. Reported-by: Tony Luck Signed-off-by: Thomas Gleixner diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 106aeb6..a48bd9a 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -270,7 +270,7 @@ void foo(void) /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ DEFINE(IA64_GTOD_SEQ_OFFSET, - offsetof (struct fsyscall_gtod_data_t, seq); + offsetof (struct fsyscall_gtod_data_t, seq)); DEFINE(IA64_GTOD_WALL_TIME_OFFSET, offsetof (struct fsyscall_gtod_data_t, wall_time)); DEFINE(IA64_GTOD_MONO_TIME_OFFSET, -- cgit v0.10.2