summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 00:21:00 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 00:21:00 (GMT)
commitf74b9444192c60603020c61d7915b72893137edc (patch)
tree8b1d16d373234038c2b045c9ceb3c33b93059e8a /lib
parent7a6362800cb7d1d618a697a650c7aaed3eb39320 (diff)
parent4ba8216cd90560bc402f52076f64d8546e8aefcb (diff)
downloadlinux-fsl-qoriq-f74b9444192c60603020c61d7915b72893137edc.tar.xz
Merge branch 'config' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl
* 'config' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl: BKL: That's all, folks fs/locks.c: Remove stale FIXME left over from BKL conversion ipx: remove the BKL appletalk: remove the BKL x25: remove the BKL ufs: remove the BKL hpfs: remove the BKL drivers: remove extraneous includes of smp_lock.h tracing: don't trace the BKL adfs: remove the big kernel lock
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile1
-rw-r--r--lib/kernel_lock.c143
3 files changed, 0 insertions, 153 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2b97418..6f440d8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -470,15 +470,6 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
-config BKL
- bool "Big Kernel Lock" if (SMP || PREEMPT)
- default y
- help
- This is the traditional lock that is used in old code instead
- of proper locking. All drivers that use the BKL should depend
- on this symbol.
- Say Y here unless you are working on removing the BKL.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index b73ba01..ef7ed71 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
-obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
deleted file mode 100644
index b135d04a..0000000
--- a/lib/kernel_lock.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * lib/kernel_lock.c
- *
- * This is the traditional BKL - big kernel lock. Largely
- * relegated to obsolescence, but used by various less
- * important (or lazy) subsystems.
- */
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
-#include <linux/smp_lock.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/bkl.h>
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reacquired
- * over schedule(). It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - do_raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
- while (!do_raw_spin_trylock(&kernel_flag)) {
- if (need_resched())
- return -EAGAIN;
- cpu_relax();
- }
- preempt_disable();
- return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
- do_raw_spin_unlock(&kernel_flag);
- preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption.
- * If SMP is not on (ie UP preemption), this all goes away because the
- * do_raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
- preempt_disable();
- if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
- /*
- * If preemption was disabled even before this
- * was called, there's nothing we can be polite
- * about - just spin.
- */
- if (preempt_count() > 1) {
- do_raw_spin_lock(&kernel_flag);
- return;
- }
-
- /*
- * Otherwise, let's wait for the kernel lock
- * with preemption enabled..
- */
- do {
- preempt_enable();
- while (raw_spin_is_locked(&kernel_flag))
- cpu_relax();
- preempt_disable();
- } while (!do_raw_spin_trylock(&kernel_flag));
- }
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
- do_raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
- /*
- * the BKL is not covered by lockdep, so we open-code the
- * unlocking sequence (and thus avoid the dep-chain ops):
- */
- do_raw_spin_unlock(&kernel_flag);
- preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc _lock_kernel(const char *func, const char *file, int line)
-{
- int depth = current->lock_depth + 1;
-
- trace_lock_kernel(func, file, line);
-
- if (likely(!depth)) {
- might_sleep();
- __lock_kernel();
- }
- current->lock_depth = depth;
-}
-
-void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
-{
- BUG_ON(current->lock_depth < 0);
- if (likely(--current->lock_depth < 0))
- __unlock_kernel();
-
- trace_unlock_kernel(func, file, line);
-}
-
-EXPORT_SYMBOL(_lock_kernel);
-EXPORT_SYMBOL(_unlock_kernel);
-