diff options
author | Kirill Tkhai <tkhai@yandex.ru> | 2014-04-16 20:45:24 (GMT) |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-02 05:15:16 (GMT) |
commit | 49b6c01f4c1de3b5e5427ac5aba80f9f6d27837a (patch) | |
tree | 5c4a5823c94b6bc48b94edb56d673be33f849a22 /arch/sparc | |
parent | aa3449ee9c87d9b7660dd1493248abcc57769e31 (diff) | |
download | linux-49b6c01f4c1de3b5e5427ac5aba80f9f6d27837a.tar.xz |
sparc64: Make itc_sync_lock raw
One more place where we must not be able
to be preempted or to be interrupted in RT.
Always actually disable interrupts during
synchronization cycle.
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 9781048..745a363 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -149,7 +149,7 @@ void cpu_panic(void) #define NUM_ROUNDS 64 /* magic value */ #define NUM_ITERS 5 /* likewise */ -static DEFINE_SPINLOCK(itc_sync_lock); +static DEFINE_RAW_SPINLOCK(itc_sync_lock); static unsigned long go[SLAVE + 1]; #define DEBUG_TICK_SYNC 0 @@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu) go[MASTER] = 0; membar_safe("#StoreLoad"); - spin_lock_irqsave(&itc_sync_lock, flags); + raw_spin_lock_irqsave(&itc_sync_lock, flags); { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { while (!go[MASTER]) @@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu) membar_safe("#StoreLoad"); } } - spin_unlock_irqrestore(&itc_sync_lock, flags); + raw_spin_unlock_irqrestore(&itc_sync_lock, flags); } #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |