diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-10-17 10:38:17 (GMT) |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-10-22 07:16:40 (GMT) |
commit | 8c071b0f19dfa230335d22ce56a8fab5bd20cedc (patch) | |
tree | ffd0d6272cd4e3f33b3069b22cb52457030a7f7a /arch | |
parent | 9784bd4f1a6ea736ad9bf241f5a965e0a2913a5e (diff) | |
download | linux-fsl-qoriq-8c071b0f19dfa230335d22ce56a8fab5bd20cedc.tar.xz |
s390/time: correct use of store clock fast
The result of the store-clock-fast (STCKF) instruction is a bit fuzzy.
It can happen that the value stored on one CPU is smaller than the value
stored on another CPU, although the order of the stores is the other
way around. This can cause deltas of get_tod_clock() values to become
negative when they should not be.
We need to be more careful with store-clock-fast, this patch partially
reverts git commit e4b7b4238e666682555461fa52eecd74652f36bb "time:
always use stckf instead of stck if available". The get_tod_clock()
function now uses the store-clock-extended (STCKE) instruction.
get_tod_clock_fast() can be used if the fuzziness of store-clock-fast
is acceptable e.g. for wait loops local to a CPU.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/timex.h | 28 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 6 | ||||
-rw-r--r-- | arch/s390/lib/delay.c | 14 |
4 files changed, 25 insertions, 25 deletions
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 8ad8af9..819b94d 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_tod_clock(void) -{ - unsigned long long clk; - -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); -#else - asm volatile("stck %0" : "=Q" (clk) : : "cc"); -#endif - return clk; -} - static inline void get_tod_clock_ext(char *clk) { asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); } -static inline unsigned long long get_tod_clock_xt(void) +static inline unsigned long long get_tod_clock(void) { unsigned char clk[16]; get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } +static inline unsigned long long get_tod_clock_fast(void) +{ +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + unsigned long long clk; + + asm volatile("stckf %0" : "=Q" (clk) : : "cc"); + return clk; +#else + return get_tod_clock(); +#endif +} + static inline cycles_t get_cycles(void) { return (cycles_t) get_tod_clock() >> 2; @@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc; */ static inline unsigned long long get_tod_clock_monotonic(void) { - return get_tod_clock_xt() - sched_clock_base_cc; + return get_tod_clock() - sched_clock_base_cc; } /** diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index f1279dc..17d62fe 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -867,7 +867,7 @@ static inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, int exception) { - active->id.stck = get_tod_clock(); + active->id.stck = get_tod_clock_fast(); active->id.fields.cpuid = smp_processor_id(); active->caller = __builtin_return_address(0); active->id.fields.exception = exception; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 7f35cb3..7f1f7ac 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) } if ((!rc) && (vcpu->arch.sie_block->ckc < - get_tod_clock() + vcpu->arch.sie_block->epoch)) { + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { if ((!psw_extint_disabled(vcpu)) && (vcpu->arch.sie_block->gcr[0] & 0x800ul)) rc = 1; @@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - now = get_tod_clock() + vcpu->arch.sie_block->epoch; + now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; if (vcpu->arch.sie_block->ckc < now) { __unset_cpu_idle(vcpu); return 0; @@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if ((vcpu->arch.sie_block->ckc < - get_tod_clock() + vcpu->arch.sie_block->epoch)) + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) __try_deliver_ckc_interrupt(vcpu); if (atomic_read(&fi->active)) { diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 57c87d7..a9f3d00 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs) do { set_clock_comparator(end); vtime_stop_cpu(); - } while (get_tod_clock() < end); + } while (get_tod_clock_fast() < end); lockdep_on(); __ctl_load(cr0, 0, 0); __ctl_load(cr6, 6, 6); @@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs) { u64 clock_saved, end; - end = get_tod_clock() + (usecs << 12); + end = get_tod_clock_fast() + (usecs << 12); do { clock_saved = 0; if (end < S390_lowcore.clock_comparator) { @@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs) vtime_stop_cpu(); if (clock_saved) local_tick_enable(clock_saved); - } while (get_tod_clock() < end); + } while (get_tod_clock_fast() < end); } /* @@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs) { u64 end; - end = get_tod_clock() + (usecs << 12); - while (get_tod_clock() < end) + end = get_tod_clock_fast() + (usecs << 12); + while (get_tod_clock_fast() < end) cpu_relax(); } @@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs) nsecs <<= 9; do_div(nsecs, 125); - end = get_tod_clock() + nsecs; + end = get_tod_clock_fast() + nsecs; if (nsecs & ~0xfffUL) __udelay(nsecs >> 12); - while (get_tod_clock() < end) + while (get_tod_clock_fast() < end) barrier(); } EXPORT_SYMBOL(__ndelay); |