From e34191fad8e5d9fe4e76f6d03b5e29e3eae7535a Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 29 Sep 2014 06:14:23 -0700 Subject: locktorture: Support rwlocks Add a "rw_lock" torture test to stress kernel rwlocks and their irq variant. Reader critical regions are 5x longer than writers. As such a similar ratio of lock acquisitions is seen in the statistics. In the case of massive contention, both hold the lock for 1/10 of a second. Signed-off-by: Davidlohr Bueso Signed-off-by: Paul E. McKenney diff --git a/Documentation/locking/locktorture.txt b/Documentation/locking/locktorture.txt index be71501..619f2bb 100644 --- a/Documentation/locking/locktorture.txt +++ b/Documentation/locking/locktorture.txt @@ -45,6 +45,11 @@ torture_type Type of lock to torture. By default, only spinlocks will o "spin_lock_irq": spin_lock_irq() and spin_unlock_irq() pairs. + o "rw_lock": read/write lock() and unlock() rwlock pairs. + + o "rw_lock_irq": read/write lock_irq() and unlock_irq() + rwlock pairs. + o "mutex_lock": mutex_lock() and mutex_unlock() pairs. o "rwsem_lock": read/write down() and up() semaphore pairs. diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 540d5df..0762b25 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -229,6 +230,110 @@ static struct lock_torture_ops spin_lock_irq_ops = { .name = "spin_lock_irq" }; +static DEFINE_RWLOCK(torture_rwlock); + +static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) +{ + write_lock(&torture_rwlock); + return 0; +} + +static void torture_rwlock_write_delay(struct torture_random_state *trsp) +{ + const unsigned long shortdelay_us = 2; + const unsigned long longdelay_ms = 100; + + /* We want a short delay mostly to emulate likely code, and + * we want a long delay occasionally to force massive contention. + */ + if (!(torture_random(trsp) % + (cxt.nrealwriters_stress * 2000 * longdelay_ms))) + mdelay(longdelay_ms); + else + udelay(shortdelay_us); +} + +static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) +{ + write_unlock(&torture_rwlock); +} + +static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) +{ + read_lock(&torture_rwlock); + return 0; +} + +static void torture_rwlock_read_delay(struct torture_random_state *trsp) +{ + const unsigned long shortdelay_us = 10; + const unsigned long longdelay_ms = 100; + + /* We want a short delay mostly to emulate likely code, and + * we want a long delay occasionally to force massive contention. + */ + if (!(torture_random(trsp) % + (cxt.nrealreaders_stress * 2000 * longdelay_ms))) + mdelay(longdelay_ms); + else + udelay(shortdelay_us); +} + +static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) +{ + read_unlock(&torture_rwlock); +} + +static struct lock_torture_ops rw_lock_ops = { + .writelock = torture_rwlock_write_lock, + .write_delay = torture_rwlock_write_delay, + .writeunlock = torture_rwlock_write_unlock, + .readlock = torture_rwlock_read_lock, + .read_delay = torture_rwlock_read_delay, + .readunlock = torture_rwlock_read_unlock, + .name = "rw_lock" +}; + +static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) +{ + unsigned long flags; + + write_lock_irqsave(&torture_rwlock, flags); + cxt.cur_ops->flags = flags; + return 0; +} + +static void torture_rwlock_write_unlock_irq(void) +__releases(torture_rwlock) +{ + write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); +} + +static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) +{ + unsigned long flags; + + read_lock_irqsave(&torture_rwlock, flags); + cxt.cur_ops->flags = flags; + return 0; +} + +static void torture_rwlock_read_unlock_irq(void) +__releases(torture_rwlock) +{ + write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); +} + +static struct lock_torture_ops rw_lock_irq_ops = { + .writelock = torture_rwlock_write_lock_irq, + .write_delay = torture_rwlock_write_delay, + .writeunlock = torture_rwlock_write_unlock_irq, + .readlock = torture_rwlock_read_lock_irq, + .read_delay = torture_rwlock_read_delay, + .readunlock = torture_rwlock_read_unlock_irq, + .name = "rw_lock_irq" +}; + static DEFINE_MUTEX(torture_mutex); static int torture_mutex_lock(void) __acquires(torture_mutex) @@ -535,8 +640,11 @@ static int __init lock_torture_init(void) int i, j; int firsterr = 0; static struct lock_torture_ops *torture_ops[] = { - &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, - &mutex_lock_ops, &rwsem_lock_ops, + &lock_busted_ops, + &spin_lock_ops, &spin_lock_irq_ops, + &rw_lock_ops, &rw_lock_irq_ops, + &mutex_lock_ops, + &rwsem_lock_ops, }; if (!torture_init_begin(torture_type, verbose, &torture_runnable)) @@ -571,7 +679,8 @@ static int __init lock_torture_init(void) cxt.debug_lock = true; #endif #ifdef CONFIG_DEBUG_SPINLOCK - if (strncmp(torture_type, "spin", 4) == 0) + if ((strncmp(torture_type, "spin", 4) == 0) || + (strncmp(torture_type, "rw_lock", 7) == 0)) cxt.debug_lock = true; #endif diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST b/tools/testing/selftests/rcutorture/configs/lock/CFLIST index 6108137..6910b73 100644 --- a/tools/testing/selftests/rcutorture/configs/lock/CFLIST +++ b/tools/testing/selftests/rcutorture/configs/lock/CFLIST @@ -1,3 +1,4 @@ LOCK01 LOCK02 LOCK03 +LOCK04 \ No newline at end of file diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK04 b/tools/testing/selftests/rcutorture/configs/lock/LOCK04 new file mode 100644 index 0000000..1d1da14 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK04 @@ -0,0 +1,6 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot new file mode 100644 index 0000000..48c04fe --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot @@ -0,0 +1 @@ +locktorture.torture_type=rw_lock -- cgit v0.10.2 From 219f800f99db6f4e43a582cb9e0d98931f13c012 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 29 Sep 2014 06:14:24 -0700 Subject: locktorture: Fix __acquire annotation for spinlock irq Its quite easy to get mixed up with the names -- 'torture_spinlock_irq' is not actually a valid spinlock name. Signed-off-by: Davidlohr Bueso Signed-off-by: Paul E. McKenney diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 0762b25..9e9cd11 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -205,7 +205,7 @@ static struct lock_torture_ops spin_lock_ops = { }; static int torture_spin_lock_write_lock_irq(void) -__acquires(torture_spinlock_irq) +__acquires(torture_spinlock) { unsigned long flags; -- cgit v0.10.2 From a1229491006a3d55cc0d7e6d496be39915ccefdd Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 29 Sep 2014 06:14:25 -0700 Subject: locktorture: Cannot hold read and write lock ... trigger an error if so. Signed-off-by: Davidlohr Bueso Signed-off-by: Paul E. McKenney diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 9e9cd11..b05dc46 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -453,14 +453,19 @@ static int lock_torture_writer(void *arg) do { if ((torture_random(&rand) & 0xfffff) == 0) schedule_timeout_uninterruptible(1); + cxt.cur_ops->writelock(); if (WARN_ON_ONCE(lock_is_write_held)) lwsp->n_lock_fail++; lock_is_write_held = 1; + if (WARN_ON_ONCE(lock_is_read_held)) + lwsp->n_lock_fail++; /* rare, but... */ + lwsp->n_lock_acquired++; cxt.cur_ops->write_delay(&rand); lock_is_write_held = 0; cxt.cur_ops->writeunlock(); + stutter_wait("lock_torture_writer"); } while (!torture_must_stop()); torture_kthread_stopping("lock_torture_writer"); @@ -482,12 +487,17 @@ static int lock_torture_reader(void *arg) do { if ((torture_random(&rand) & 0xfffff) == 0) schedule_timeout_uninterruptible(1); + cxt.cur_ops->readlock(); lock_is_read_held = 1; + if (WARN_ON_ONCE(lock_is_write_held)) + lrsp->n_lock_fail++; /* rare, but... */ + lrsp->n_lock_acquired++; cxt.cur_ops->read_delay(&rand); lock_is_read_held = 0; cxt.cur_ops->readunlock(); + stutter_wait("lock_torture_reader"); } while (!torture_must_stop()); torture_kthread_stopping("lock_torture_reader"); -- cgit v0.10.2 From c98fed9fc6a7449affd941d8a8e9fcb0c72977d6 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 29 Sep 2014 06:14:26 -0700 Subject: locktorture: Cleanup header usage Remove some unnecessary ones and explicitly include rwsem.h Signed-off-by: Davidlohr Bueso Signed-off-by: Paul E. McKenney diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index b05dc46..ec8cce2 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -20,32 +20,20 @@ * Author: Paul E. McKenney * Based on kernel/rcu/torture.c. */ -#include #include -#include #include #include -#include #include #include #include +#include #include #include #include #include -#include -#include #include -#include -#include -#include -#include -#include #include -#include #include -#include -#include #include MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 789cbbeca4eb7141cbd748ee93772471101b507b Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Sun, 5 Oct 2014 13:24:21 -0400 Subject: workqueue: Add quiescent state between work items Similar to the stop_machine deadlock scenario on !PREEMPT kernels addressed in b22ce2785d97 "workqueue: cond_resched() after processing each work item", kworker threads requeueing back-to-back with zero jiffy delay can stall RCU. The cond_resched call introduced in that fix will yield only iff there are other higher priority tasks to run, so force a quiescent RCU state between work items. Signed-off-by: Joe Lawrence Link: https://lkml.kernel.org/r/20140926105227.01325697@jlaw-desktop.mno.stratus.com Link: https://lkml.kernel.org/r/20140929115445.40221d8e@jlaw-desktop.mno.stratus.com Fixes: b22ce2785d97 ("workqueue: cond_resched() after processing each work item") Cc: Acked-by: Tejun Heo Signed-off-by: Paul E. McKenney diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5dbe22a..345bec9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2043,8 +2043,10 @@ __acquires(&pool->lock) * kernels, where a requeueing work item waiting for something to * happen could deadlock with stop_machine as such work item could * indefinitely requeue itself while all other CPUs are trapped in - * stop_machine. + * stop_machine. At the same time, report a quiescent RCU state so + * the same condition doesn't freeze RCU. */ + rcu_note_voluntary_context_switch(current); cond_resched(); spin_lock_irq(&pool->lock); -- cgit v0.10.2 From 3e28e377204badfc3c4119ff2abda473127ee0ff Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Sun, 5 Oct 2014 13:24:22 -0400 Subject: workqueue: Use cond_resched_rcu_qs macro Tidy up and use cond_resched_rcu_qs when calling cond_resched and reporting potential quiescent state to RCU. Splitting this change in this way allows easy backporting to -stable for kernel versions not having cond_resched_rcu_qs(). Signed-off-by: Joe Lawrence Acked-by: Tejun Heo Signed-off-by: Paul E. McKenney diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 345bec9..09b685d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2046,8 +2046,7 @@ __acquires(&pool->lock) * stop_machine. At the same time, report a quiescent RCU state so * the same condition doesn't freeze RCU. */ - rcu_note_voluntary_context_switch(current); - cond_resched(); + cond_resched_rcu_qs(); spin_lock_irq(&pool->lock); -- cgit v0.10.2