From 3003eba313dd0e0502dd71548c36fe7c19801ce5 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:41:54 -0400 Subject: lockdep: Print a nicer description for irq lock inversions Locking order inversion due to interrupts is a subtle problem. When an irq lockiinversion discovered by lockdep it currently reports something like: [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ] ... and then prints out the locks that are involved, as back traces. Judging by lkml feedback developers were routinely confused by what a HARDIRQ->safe to unsafe issue is all about, and sometimes even blew it off as a bug in lockdep. It is not obvious when lockdep prints this message about a lock that is never taken in interrupt context. After explaining the problems that lockdep is reporting, I decided to add a description of the problem in visual form. Now the following is shown: --- other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lockA); local_irq_disable(); lock(&rq->lock); lock(lockA); lock(&rq->lock); *** DEADLOCK *** --- The above is the case when the unsafe lock is taken while holding a lock taken in irq context. But when a lock is taken that also grabs a unsafe lock, the call chain is shown: --- other info that might help us debug this: Chain exists of: &rq->lock --> lockA --> lockC Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lockC); local_irq_disable(); lock(&rq->lock); lock(lockA); lock(&rq->lock); *** DEADLOCK *** Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014259.132728798@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 53a6895..7b2ffee 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -490,6 +490,18 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) usage[i] = '\0'; } +static int __print_lock_name(struct lock_class *class) +{ + char str[KSYM_NAME_LEN]; + const char *name; + + name = class->name; + if (!name) + name = __get_key_name(class->key, str); + + return printk("%s", name); +} + static void print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; @@ -1325,6 +1337,62 @@ print_shortest_lock_dependencies(struct lock_list *leaf, return; } +static void +print_irq_lock_scenario(struct lock_list *safe_entry, + struct lock_list *unsafe_entry, + struct held_lock *prev, + struct held_lock *next) +{ + struct lock_class *safe_class = safe_entry->class; + struct lock_class *unsafe_class = unsafe_entry->class; + struct lock_class *middle_class = hlock_class(prev); + + if (middle_class == safe_class) + middle_class = hlock_class(next); + + /* + * A direct locking problem where unsafe_class lock is taken + * directly by safe_class lock, then all we need to show + * is the deadlock scenario, as it is obvious that the + * unsafe lock is taken under the safe lock. + * + * But if there is a chain instead, where the safe lock takes + * an intermediate lock (middle_class) where this lock is + * not the same as the safe lock, then the lock chain is + * used to describe the problem. Otherwise we would need + * to show a different CPU case for each link in the chain + * from the safe_class lock to the unsafe_class lock. + */ + if (middle_class != unsafe_class) { + printk("Chain exists of:\n "); + __print_lock_name(safe_class); + printk(" --> "); + __print_lock_name(middle_class); + printk(" --> "); + __print_lock_name(unsafe_class); + printk("\n\n"); + } + + printk(" Possible interrupt unsafe locking scenario:\n\n"); + printk(" CPU0 CPU1\n"); + printk(" ---- ----\n"); + printk(" lock("); + __print_lock_name(unsafe_class); + printk(");\n"); + printk(" local_irq_disable();\n"); + printk(" lock("); + __print_lock_name(safe_class); + printk(");\n"); + printk(" lock("); + __print_lock_name(middle_class); + printk(");\n"); + printk(" \n"); + printk(" lock("); + __print_lock_name(safe_class); + printk(");\n"); + printk("\n *** DEADLOCK ***\n\n"); +} + static int print_bad_irq_dependency(struct task_struct *curr, struct lock_list *prev_root, @@ -1376,6 +1444,8 @@ print_bad_irq_dependency(struct task_struct *curr, print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); printk("\nother info that might help us debug this:\n\n"); + print_irq_lock_scenario(backwards_entry, forwards_entry, prev, next); + lockdep_print_held_locks(curr); printk("\nthe dependencies between %s-irq-safe lock", irqclass); -- cgit v0.10.2 From f4185812aa046ecb97e8817e10148cacdd7a6baa Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:41:55 -0400 Subject: lockdep: Print a nicer description for normal deadlocks The lockdep output can be pretty cryptic, having nicer output can save a lot of head scratching. When a normal deadlock scenario is detected by lockdep (lock A -> lock B and there exists a place where lock B -> lock A) we now get the following new output: other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lockB); lock(lockA); lock(lockB); lock(lockA); *** DEADLOCK *** On cases where there's a deeper chair, it shows the partial chain that can cause the issue: Chain exists of: lockC --> lockA --> lockB Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lockB); lock(lockA); lock(lockB); lock(lockC); *** DEADLOCK *** Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014259.380621789@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7b2ffee..73cebd7 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1065,6 +1065,56 @@ print_circular_bug_entry(struct lock_list *target, int depth) return 0; } +static void +print_circular_lock_scenario(struct held_lock *src, + struct held_lock *tgt, + struct lock_list *prt) +{ + struct lock_class *source = hlock_class(src); + struct lock_class *target = hlock_class(tgt); + struct lock_class *parent = prt->class; + + /* + * A direct locking problem where unsafe_class lock is taken + * directly by safe_class lock, then all we need to show + * is the deadlock scenario, as it is obvious that the + * unsafe lock is taken under the safe lock. + * + * But if there is a chain instead, where the safe lock takes + * an intermediate lock (middle_class) where this lock is + * not the same as the safe lock, then the lock chain is + * used to describe the problem. Otherwise we would need + * to show a different CPU case for each link in the chain + * from the safe_class lock to the unsafe_class lock. + */ + if (parent != source) { + printk("Chain exists of:\n "); + __print_lock_name(source); + printk(" --> "); + __print_lock_name(parent); + printk(" --> "); + __print_lock_name(target); + printk("\n\n"); + } + + printk(" Possible unsafe locking scenario:\n\n"); + printk(" CPU0 CPU1\n"); + printk(" ---- ----\n"); + printk(" lock("); + __print_lock_name(target); + printk(");\n"); + printk(" lock("); + __print_lock_name(parent); + printk(");\n"); + printk(" lock("); + __print_lock_name(target); + printk(");\n"); + printk(" lock("); + __print_lock_name(source); + printk(");\n"); + printk("\n *** DEADLOCK ***\n\n"); +} + /* * When a circular dependency is detected, print the * header first: @@ -1108,6 +1158,7 @@ static noinline int print_circular_bug(struct lock_list *this, { struct task_struct *curr = current; struct lock_list *parent; + struct lock_list *first_parent; int depth; if (!debug_locks_off_graph_unlock() || debug_locks_silent) @@ -1121,6 +1172,7 @@ static noinline int print_circular_bug(struct lock_list *this, print_circular_bug_header(target, depth, check_src, check_tgt); parent = get_lock_parent(target); + first_parent = parent; while (parent) { print_circular_bug_entry(parent, --depth); @@ -1128,6 +1180,9 @@ static noinline int print_circular_bug(struct lock_list *this, } printk("\nother info that might help us debug this:\n\n"); + print_circular_lock_scenario(check_src, check_tgt, + first_parent); + lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); -- cgit v0.10.2 From 48702ecf308e53f176c1f6fdc193d622ded54ac0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:41:56 -0400 Subject: lockdep: Print a nicer description for simple deadlocks Lockdep output can be pretty cryptic, having nicer output can save a lot of head scratching. When a simple deadlock scenario is detected by lockdep (lock A -> lock A) we now get the following new output: other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(lock)->rlock); lock(&(lock)->rlock); *** DEADLOCK *** Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014259.643930104@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 73cebd7..c4cc5d1 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1664,6 +1664,26 @@ static inline void inc_chains(void) #endif +static void +print_deadlock_scenario(struct held_lock *nxt, + struct held_lock *prv) +{ + struct lock_class *next = hlock_class(nxt); + struct lock_class *prev = hlock_class(prv); + + printk(" Possible unsafe locking scenario:\n\n"); + printk(" CPU0\n"); + printk(" ----\n"); + printk(" lock("); + __print_lock_name(prev); + printk(");\n"); + printk(" lock("); + __print_lock_name(next); + printk(");\n"); + printk("\n *** DEADLOCK ***\n\n"); + printk(" May be due to missing lock nesting notation\n\n"); +} + static int print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) @@ -1682,6 +1702,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, print_lock(prev); printk("\nother info that might help us debug this:\n"); + print_deadlock_scenario(next, prev); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); -- cgit v0.10.2 From dad3d7435e1d8c254d6877dc06852dc00c5da812 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:41:57 -0400 Subject: lockdep: Print a nicer description for irq inversion bugs Irq inversion and irq dependency bugs are only subtly different. The diffenerence lies where the interrupt occurred. For irq dependency: irq_disable lock(A) lock(B) unlock(B) unlock(A) irq_enable lock(B) unlock(B) lock(A) The interrupt comes in after it has been established that lock A can be held when taking an irq unsafe lock. Lockdep detects the problem when taking lock A in interrupt context. With the irq_inversion the irq happens before it is established and lockdep detects the problem with the taking of lock B: lock(A) irq_disable lock(A) lock(B) unlock(B) unlock(A) irq_enable lock(B) unlock(B) Since the problem with the locking logic for both of these issues is in actuality the same, they both should report the same scenario. This patch implements that and prints this: other info that might help us debug this: Chain exists of: &rq->lock --> lockA --> lockC Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lockC); local_irq_disable(); lock(&rq->lock); lock(lockA); lock(&rq->lock); *** DEADLOCK *** Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014259.910720381@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index c4cc5d1..0b497dd 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1395,15 +1395,15 @@ print_shortest_lock_dependencies(struct lock_list *leaf, static void print_irq_lock_scenario(struct lock_list *safe_entry, struct lock_list *unsafe_entry, - struct held_lock *prev, - struct held_lock *next) + struct lock_class *prev_class, + struct lock_class *next_class) { struct lock_class *safe_class = safe_entry->class; struct lock_class *unsafe_class = unsafe_entry->class; - struct lock_class *middle_class = hlock_class(prev); + struct lock_class *middle_class = prev_class; if (middle_class == safe_class) - middle_class = hlock_class(next); + middle_class = next_class; /* * A direct locking problem where unsafe_class lock is taken @@ -1499,7 +1499,8 @@ print_bad_irq_dependency(struct task_struct *curr, print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); printk("\nother info that might help us debug this:\n\n"); - print_irq_lock_scenario(backwards_entry, forwards_entry, prev, next); + print_irq_lock_scenario(backwards_entry, forwards_entry, + hlock_class(prev), hlock_class(next)); lockdep_print_held_locks(curr); @@ -2219,6 +2220,10 @@ print_irq_inversion_bug(struct task_struct *curr, struct held_lock *this, int forwards, const char *irqclass) { + struct lock_list *entry = other; + struct lock_list *middle = NULL; + int depth; + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; @@ -2237,6 +2242,25 @@ print_irq_inversion_bug(struct task_struct *curr, printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); printk("\nother info that might help us debug this:\n"); + + /* Find a middle lock (if one exists) */ + depth = get_lock_depth(other); + do { + if (depth == 0 && (entry != root)) { + printk("lockdep:%s bad path found in chain graph\n", __func__); + break; + } + middle = entry; + entry = get_lock_parent(entry); + depth--; + } while (entry && entry != root && (depth >= 0)); + if (forwards) + print_irq_lock_scenario(root, other, + middle ? middle->class : root->class, other->class); + else + print_irq_lock_scenario(other, root, + middle ? middle->class : other->class, root->class); + lockdep_print_held_locks(curr); printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); -- cgit v0.10.2 From 6be8c3935b914dfbc24b27c91c2b6d583645e61a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:41:58 -0400 Subject: lockdep: Replace "Bad BFS generated tree" message with something less cryptic The message of "Bad BFS generated tree" is a bit confusing. Replace it with a more sane error message. Thanks to Peter Zijlstra for helping me come up with a better message. Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014300.135521252@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0b497dd..270cfa4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1381,7 +1381,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, printk("\n"); if (depth == 0 && (entry != root)) { - printk("lockdep:%s bad BFS generated tree\n", __func__); + printk("lockdep:%s bad path found in chain graph\n", __func__); break; } -- cgit v0.10.2 From 282b5c2f6f663c008444321fd8fcdd374596046b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:41:59 -0400 Subject: lockdep: Print a nicer description for simple irq lock inversions Lockdep output can be pretty cryptic, having nicer output can save a lot of head scratching. When a simple irq inversion scenario is detected by lockdep (lock A taken in interrupt context but also in thread context without disabling interrupts) we now get the following (hopefully more informative) output: other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(lockA); lock(lockA); *** DEADLOCK *** Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014300.436140880@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 270cfa4..27c609f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2158,6 +2158,24 @@ static void check_chain_key(struct task_struct *curr) #endif } +static void +print_usage_bug_scenario(struct held_lock *lock) +{ + struct lock_class *class = hlock_class(lock); + + printk(" Possible unsafe locking scenario:\n\n"); + printk(" CPU0\n"); + printk(" ----\n"); + printk(" lock("); + __print_lock_name(class); + printk(");\n"); + printk(" \n"); + printk(" lock("); + __print_lock_name(class); + printk(");\n"); + printk("\n *** DEADLOCK ***\n\n"); +} + static int print_usage_bug(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) @@ -2186,6 +2204,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, print_irqtrace_events(curr); printk("\nother info that might help us debug this:\n"); + print_usage_bug_scenario(this); + lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); -- cgit v0.10.2 From e0944ee63f7249802be74454cef81c97630ae1cd Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:42:00 -0400 Subject: lockdep: Remove cmpxchg to update nr_chain_hlocks For some reason nr_chain_hlocks is updated with cmpxchg, but this is performed inside of the lockdep global "grab_lock()", which also makes simple modification of this variable atomic. Remove the cmpxchg logic for updating nr_chain_hlocks and simplify the code. Signed-off-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110421014300.727863282@goodmis.org Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 27c609f..63437d0 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1973,7 +1973,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr, *hlock_next; - int i, j, n, cn; + int i, j; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; @@ -2033,15 +2033,9 @@ cache_hit: } i++; chain->depth = curr->lockdep_depth + 1 - i; - cn = nr_chain_hlocks; - while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { - n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); - if (n == cn) - break; - cn = n; - } - if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { - chain->base = cn; + if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { + chain->base = nr_chain_hlocks; + nr_chain_hlocks += chain->depth; for (j = 0; j < chain->depth - 1; j++, i++) { int lock_id = curr->held_locks[i].class_idx - 1; chain_hlocks[chain->base + j] = lock_id; -- cgit v0.10.2 From e11feaa1192a079ba8e88a12121e9b12d55d4239 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 27 Apr 2011 14:27:24 -0400 Subject: watchdog, hung_task_timeout: Add Kconfig configurable default This patch allows the default value for sysctl_hung_task_timeout_secs to be set at build time. The feature carries virtually no overhead, so it makes sense to keep it enabled. On heavily loaded systems, though, it can end up triggering stack traces when there is no bug other than the system being underprovisioned. We use this patch to keep the hung task facility available but disabled at boot-time. The default of 120 seconds is preserved. As a note, commit e162b39a may have accidentally reverted commit fb822db4, which raised the default from 120 seconds to 480 seconds. Signed-off-by: Jeff Mahoney Acked-by: Mandeep Singh Baines Link: http://lkml.kernel.org/r/4DB8600C.8080000@suse.com Signed-off-by: Ingo Molnar diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 53ead17..ea64012 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -33,7 +33,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; /* * Zero means infinite timeout - no checking done: */ -unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; +unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; unsigned long __read_mostly sysctl_hung_task_warnings = 10; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c768bcd..debbb05 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -238,6 +238,21 @@ config DETECT_HUNG_TASK enabled then all held locks will also be reported. This feature has negligible overhead. +config DEFAULT_HUNG_TASK_TIMEOUT + int "Default timeout for hung task detection (in seconds)" + depends on DETECT_HUNG_TASK + default 120 + help + This option controls the default timeout (in seconds) used + to determine when a task has become non-responsive and should + be considered hung. + + It can be adjusted at runtime via the kernel.hung_task_timeout + sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. + + A timeout of 0 disables the check. The default is two minutes. + Keeping the default should be fine in most cases. + config BOOTPARAM_HUNG_TASK_PANIC bool "Panic (Reboot) On Hung Tasks" depends on DETECT_HUNG_TASK -- cgit v0.10.2 From 5db1256a5131d3b133946fa02ac9770a784e6eb2 Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Thu, 12 May 2011 04:13:54 -0500 Subject: seqlock: Don't smp_rmb in seqlock reader spin loop Move the smp_rmb after cpu_relax loop in read_seqlock and add ACCESS_ONCE to make sure the test and return are consistent. A multi-threaded core in the lab didn't like the update from 2.6.35 to 2.6.36, to the point it would hang during boot when multiple threads were active. Bisection showed af5ab277ded04bd9bc6b048c5a2f0e7d70ef0867 (clockevents: Remove the per cpu tick skew) as the culprit and it is supported with stack traces showing xtime_lock waits including tick_do_update_jiffies64 and/or update_vsyscall. Experimentation showed the combination of cpu_relax and smp_rmb was significantly slowing the progress of other threads sharing the core, and this patch is effective in avoiding the hang. A theory is the rmb is affecting the whole core while the cpu_relax is causing a resource rebalance flush, together they cause an interfernce cadance that is unbroken when the seqlock reader has interrupts disabled. At first I was confused why the refactor in 3c22cd5709e8143444a6d08682a87f4c57902df3 (kernel: optimise seqlock) didn't affect this patch application, but after some study that affected seqcount not seqlock. The new seqcount was not factored back into the seqlock. I defer that the future. While the removal of the timer interrupt offset created contention for the xtime lock while a cpu does the additonal work to update the system clock, the seqlock implementation with the tight rmb spin loop goes back much further, and is just waiting for the right trigger. Cc: Signed-off-by: Milton Miller Cc: Cc: Linus Torvalds Cc: Andi Kleen Cc: Nick Piggin Cc: Benjamin Herrenschmidt Cc: Anton Blanchard Cc: Paul McKenney Acked-by: Eric Dumazet Link: http://lkml.kernel.org/r/%3Cseqlock-rmb%40mdm.bga.com%3E Signed-off-by: Thomas Gleixner diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e98cd2e..06d6964 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) unsigned ret; repeat: - ret = sl->sequence; - smp_rmb(); + ret = ACCESS_ONCE(sl->sequence); if (unlikely(ret & 1)) { cpu_relax(); goto repeat; } + smp_rmb(); return ret; } -- cgit v0.10.2