summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-26 17:39:56 (GMT)
committerEmil Medve <Emilian.Medve@Freescale.com>2013-04-30 08:17:16 (GMT)
commiteff404912cbe53ecb8ca1fd31b019183249ec8e1 (patch)
tree230dd71c7854fd9ba6b4ecd9de8cd1a3530686b8
parentb24bd2c86c14ffd2e7a99e51184bf95b369d8b3d (diff)
downloadlinux-fsl-qoriq-eff404912cbe53ecb8ca1fd31b019183249ec8e1.tar.xz
rt: Add the preempt-rt lock replacement APIs
Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex based locking functions for preempt-rt. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/rwlock_rt.h123
-rw-r--r--include/linux/spinlock.h12
-rw-r--r--include/linux/spinlock_api_smp.h4
-rw-r--r--include/linux/spinlock_rt.h158
-rw-r--r--kernel/Makefile9
-rw-r--r--kernel/rt.c442
-rw-r--r--kernel/spinlock.c7
-rw-r--r--lib/spinlock_debug.c5
8 files changed, 756 insertions, 4 deletions
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644
index 0000000..853ee36
--- /dev/null
+++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,123 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_init(&(rwl)->lock); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
+extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
+extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
+
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags) \
+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ migrate_disable(); \
+ flags = rt_read_lock_irqsave(lock); \
+ } while (0)
+
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ migrate_disable(); \
+ flags = rt_write_lock_irqsave(lock); \
+ } while (0)
+
+#define read_lock(lock) \
+ do { \
+ migrate_disable(); \
+ rt_read_lock(lock); \
+ } while (0)
+
+#define read_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+ migrate_disable(); \
+ rt_read_lock(lock); \
+ } while (0)
+
+#define read_lock_irq(lock) read_lock(lock)
+
+#define write_lock(lock) \
+ do { \
+ migrate_disable(); \
+ rt_write_lock(lock); \
+ } while (0)
+
+#define write_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+ migrate_disable(); \
+ rt_write_lock(lock); \
+ } while (0)
+
+#define write_lock_irq(lock) write_lock(lock)
+
+#define read_unlock(lock) \
+ do { \
+ rt_read_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define read_unlock_bh(lock) \
+ do { \
+ rt_read_unlock(lock); \
+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+#define read_unlock_irq(lock) read_unlock(lock)
+
+#define write_unlock(lock) \
+ do { \
+ rt_write_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define write_unlock_bh(lock) \
+ do { \
+ rt_write_unlock(lock); \
+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+#define write_unlock_irq(lock) write_unlock(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ (void) flags; \
+ rt_read_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ (void) flags; \
+ rt_write_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ce..0c11a7c 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
-#include <linux/rwlock.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_rt.h>
+#else
+# include <linux/rwlock.h>
+#endif
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/spinlock_rt.h>
+#else /* PREEMPT_RT_FULL */
+
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -394,4 +402,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 51df117..3f68f50 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
-#include <linux/rwlock_api_smp.h>
+#ifndef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_api_smp.h>
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644
index 0000000..a9ea392
--- /dev/null
+++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,158 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#include <linux/bug.h>
+
+extern void
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
+
+#define spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key); \
+} while (0)
+
+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
+extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
+
+/*
+ * lockdep-less calls, for derived types like rwlock:
+ * (for trylock they can use rt_mutex_trylock() directly.
+ */
+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+
+#define spin_lock_local(lock) rt_spin_lock(lock)
+#define spin_unlock_local(lock) rt_spin_unlock(lock)
+
+#define spin_lock(lock) \
+ do { \
+ migrate_disable(); \
+ rt_spin_lock(lock); \
+ } while (0)
+
+#define spin_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+ migrate_disable(); \
+ rt_spin_lock(lock); \
+ } while (0)
+
+#define spin_lock_irq(lock) spin_lock(lock)
+
+#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
+
+#ifdef CONFIG_LOCKDEP
+# define spin_lock_nested(lock, subclass) \
+ do { \
+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+#else
+# define spin_lock_nested(lock, subclass) spin_lock(lock)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+#endif
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+
+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
+{
+ unsigned long flags = 0;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ flags = rt_spin_lock_trace_flags(lock);
+#else
+ spin_lock(lock); /* lock_local */
+#endif
+ return flags;
+}
+
+/* FIXME: we need rt_spin_lock_nest_lock */
+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
+
+#define spin_unlock(lock) \
+ do { \
+ rt_spin_unlock(lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define spin_unlock_bh(lock) \
+ do { \
+ rt_spin_unlock(lock); \
+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+#define spin_unlock_irq(lock) spin_unlock(lock)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ (void) flags; \
+ spin_unlock(lock); \
+ } while (0)
+
+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_irq(lock) spin_trylock(lock)
+
+#define spin_trylock_irqsave(lock, flags) \
+ rt_spin_trylock_irqsave(lock, &(flags))
+
+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
+
+#ifdef CONFIG_GENERIC_LOCKBREAK
+# define spin_is_contended(lock) ((lock)->break_lock)
+#else
+# define spin_is_contended(lock) (((void)(lock), 0))
+#endif
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return !rt_mutex_is_locked(&lock->lock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return rt_mutex_is_locked(&lock->lock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ BUG_ON(!spin_is_locked(lock));
+}
+
+#define atomic_dec_and_lock(atomic, lock) \
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c072b6..0e12fc2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,8 +7,8 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
rcupdate.o extable.o params.o posix-timers.o \
- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
+ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
+ hrtimer.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o cred.o \
async.o range.o groups.o lglock.o smpboot.o
@@ -32,7 +32,11 @@ obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+obj-y += mutex.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+obj-y += rwsem.o
+endif
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
@@ -44,6 +48,7 @@ endif
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += smp.o
ifneq ($(CONFIG_SMP),y)
diff --git a/kernel/rt.c b/kernel/rt.c
new file mode 100644
index 0000000..92a16e1
--- /dev/null
+++ b/kernel/rt.c
@@ -0,0 +1,442 @@
+/*
+ * kernel/rt.c
+ *
+ * Real-Time Preemption Support
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * historic credit for proving that Linux spinlocks can be implemented via
+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
+ * and others) who prototyped it on 2.4 and did lots of comparative
+ * research and analysis; TimeSys, for proving that you can implement a
+ * fully preemptible kernel via the use of IRQ threading and mutexes;
+ * Bill Huey for persuasively arguing on lkml that the mutex model is the
+ * right one; and to MontaVista, who ported pmutexes to 2.6.
+ *
+ * This code is a from-scratch implementation and is not based on pmutexes,
+ * but the idea of converting spinlocks to mutexes is used here too.
+ *
+ * lock debugging, locking tree, deadlock detection:
+ *
+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ * Released under the General Public License (GPL).
+ *
+ * Includes portions of the generic R/W semaphore implementation from:
+ *
+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
+ * - Derived also from comments by Linus
+ *
+ * Pending ownership of locks and ownership stealing:
+ *
+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
+ *
+ * (also by Steven Rostedt)
+ * - Converted single pi_lock to individual task locks.
+ *
+ * By Esben Nielsen:
+ * Doing priority inheritance with help of the scheduler.
+ *
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * - major rework based on Esben Nielsens initial patch
+ * - replaced thread_info references by task_struct refs
+ * - removed task->pending_owner dependency
+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
+ * in the scheduler return path as discussed with Steven Rostedt
+ *
+ * Copyright (C) 2006, Kihon Technologies Inc.
+ * Steven Rostedt <rostedt@goodmis.org>
+ * - debugged and patched Thomas Gleixner's rework.
+ * - added back the cmpxchg to the rework.
+ * - turned atomic require back on for SMP.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/plist.h>
+#include <linux/fs.h>
+#include <linux/futex.h>
+#include <linux/hrtimer.h>
+
+#include "rtmutex_common.h"
+
+/*
+ * struct mutex functions
+ */
+void __mutex_do_init(struct mutex *mutex, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
+ lockdep_init_map(&mutex->dep_map, name, key, 0);
+#endif
+ mutex->lock.save_state = 0;
+}
+EXPORT_SYMBOL(__mutex_do_init);
+
+void __lockfunc _mutex_lock(struct mutex *lock)
+{
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock);
+
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_interruptible);
+
+int __lockfunc _mutex_lock_killable(struct mutex *lock)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ ret = rt_mutex_lock_killable(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_killable);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
+{
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
+ rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock_nested);
+
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
+{
+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
+ rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock_nest_lock);
+
+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
+{
+ int ret;
+
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
+ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
+
+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ ret = rt_mutex_lock_killable(&lock->lock, 0);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_killable_nested);
+#endif
+
+int __lockfunc _mutex_trylock(struct mutex *lock)
+{
+ int ret = rt_mutex_trylock(&lock->lock);
+
+ if (ret)
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+ return ret;
+}
+EXPORT_SYMBOL(_mutex_trylock);
+
+void __lockfunc _mutex_unlock(struct mutex *lock)
+{
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_unlock);
+
+/*
+ * rwlock_t functions
+ */
+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+{
+ int ret = rt_mutex_trylock(&rwlock->lock);
+
+ migrate_disable();
+ if (ret)
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+{
+ int ret;
+
+ *flags = 0;
+ migrate_disable();
+ ret = rt_write_trylock(rwlock);
+ if (!ret)
+ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock_irqsave);
+
+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+{
+ struct rt_mutex *lock = &rwlock->lock;
+ int ret = 1;
+
+ /*
+ * recursive read locks succeed when current owns the lock,
+ * but not when read_depth == 0 which means that the lock is
+ * write locked.
+ */
+ migrate_disable();
+ if (rt_mutex_owner(lock) != current)
+ ret = rt_mutex_trylock(lock);
+ else if (!rwlock->read_depth)
+ ret = 0;
+
+ if (ret) {
+ rwlock->read_depth++;
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+ } else
+ migrate_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
+
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
+{
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
+{
+ struct rt_mutex *lock = &rwlock->lock;
+
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+
+ /*
+ * recursive read locks succeed when current owns the lock
+ */
+ if (rt_mutex_owner(lock) != current)
+ __rt_spin_lock(lock);
+ rwlock->read_depth++;
+}
+
+EXPORT_SYMBOL(rt_read_lock);
+
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ __rt_spin_unlock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+{
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+
+ /* Release the lock only when read_depth is down to 0 */
+ if (--rwlock->read_depth == 0)
+ __rt_spin_unlock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+
+ return 0;
+}
+EXPORT_SYMBOL(rt_write_lock_irqsave);
+
+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+
+ return 0;
+}
+EXPORT_SYMBOL(rt_read_lock_irqsave);
+
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+ lockdep_init_map(&rwlock->dep_map, name, key, 0);
+#endif
+ rwlock->lock.save_state = 1;
+ rwlock->read_depth = 0;
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+
+/*
+ * rw_semaphores
+ */
+
+void rt_up_write(struct rw_semaphore *rwsem)
+{
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_up_write);
+
+void rt_up_read(struct rw_semaphore *rwsem)
+{
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ if (--rwsem->read_depth == 0)
+ rt_mutex_unlock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_up_read);
+
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void rt_downgrade_write(struct rw_semaphore *rwsem)
+{
+ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
+ rwsem->read_depth = 1;
+}
+EXPORT_SYMBOL(rt_downgrade_write);
+
+int rt_down_write_trylock(struct rw_semaphore *rwsem)
+{
+ int ret = rt_mutex_trylock(&rwsem->lock);
+
+ if (ret)
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ return ret;
+}
+EXPORT_SYMBOL(rt_down_write_trylock);
+
+void rt_down_write(struct rw_semaphore *rwsem)
+{
+ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write);
+
+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
+{
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write_nested);
+
+int rt_down_read_trylock(struct rw_semaphore *rwsem)
+{
+ struct rt_mutex *lock = &rwsem->lock;
+ int ret = 1;
+
+ /*
+ * recursive read locks succeed when current owns the rwsem,
+ * but not when read_depth == 0 which means that the rwsem is
+ * write locked.
+ */
+ if (rt_mutex_owner(lock) != current)
+ ret = rt_mutex_trylock(&rwsem->lock);
+ else if (!rwsem->read_depth)
+ ret = 0;
+
+ if (ret) {
+ rwsem->read_depth++;
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rt_down_read_trylock);
+
+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
+{
+ struct rt_mutex *lock = &rwsem->lock;
+
+ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
+
+ if (rt_mutex_owner(lock) != current)
+ rt_mutex_lock(&rwsem->lock);
+ rwsem->read_depth++;
+}
+
+void rt_down_read(struct rw_semaphore *rwsem)
+{
+ __rt_down_read(rwsem, 0);
+}
+EXPORT_SYMBOL(rt_down_read);
+
+void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
+{
+ __rt_down_read(rwsem, subclass);
+}
+EXPORT_SYMBOL(rt_down_read_nested);
+
+void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
+ lockdep_init_map(&rwsem->dep_map, name, key, 0);
+#endif
+ rwsem->read_depth = 0;
+ rwsem->lock.save_state = 0;
+}
+EXPORT_SYMBOL(__rt_rwsem_init);
+
+/**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+ * @lock: the mutex to return holding if we dec to 0
+ *
+ * return true and hold lock if we dec to 0, return false otherwise
+ */
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
+{
+ /* dec if we can't possibly hit 0 */
+ if (atomic_add_unless(cnt, -1, 1))
+ return 0;
+ /* we might hit 0, so take the lock */
+ mutex_lock(lock);
+ if (!atomic_dec_and_test(cnt)) {
+ /* when we actually did the dec, we didn't hit 0 */
+ mutex_unlock(lock);
+ return 0;
+ }
+ /* we hit 0, and we hold the lock */
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd806..da9775b 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
+
+#ifndef CONFIG_PREEMPT_RT_FULL
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
+#endif
#endif
@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
+#endif /* !PREEMPT_RT_FULL */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 0374a59..9497033 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init);
+#ifndef CONFIG_PREEMPT_RT_FULL
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
}
EXPORT_SYMBOL(__rwlock_init);
+#endif
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock);
}
+#ifndef CONFIG_PREEMPT_RT_FULL
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
+
+#endif