summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-10-29 20:03:43 (GMT)
committerScott Wood <scottwood@freescale.com>2013-10-29 20:03:43 (GMT)
commitae60d5d27c429b13cf28a09ab8b9d30682433c5a (patch)
tree16b67511ef66b0580c267a5438d1face3a3778e6 /lib
parentb095c5c2577aeedce2db847fa117596628d4e7cb (diff)
parentd0ebef8230e267ec47d4d4a65fe3262e2ebb8026 (diff)
downloadlinux-fsl-qoriq-ae60d5d27c429b13cf28a09ab8b9d30682433c5a.tar.xz
Revert to 3.8 (no rt, no stable)
This is a merge from rtmerge, which has been similarly reverted. Conflicts: drivers/crypto/caam/caamalg.c drivers/misc/Makefile
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile3
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/idr.c9
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/locking-selftest.c57
-rw-r--r--lib/oid_registry.c5
-rw-r--r--lib/percpu-rwsem.c4
-rw-r--r--lib/radix-tree.c5
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/spinlock_debug.c5
13 files changed, 28 insertions, 89 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5f521b8..f56997c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -322,7 +322,6 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
- depends on !PREEMPT_RT_FULL
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 24b60ba..67604e5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_KERNEL
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
- depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE
+ depends on DEBUG_KERNEL && GENERIC_HARDIRQS
help
Enable this to generate a spurious interrupt as soon as a shared
interrupt handler is registered, and just before one is deregistered.
diff --git a/lib/Makefile b/lib/Makefile
index 72449f5..22bd795 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -38,11 +38,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-
-ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-endif
lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index cf5f02f..d11808c 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -309,10 +309,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (preempt_count() == 0 && !irqs_disabled())
-#endif
- fill_pool();
+ fill_pool();
db = get_bucket((unsigned long) addr);
diff --git a/lib/idr.c b/lib/idr.c
index ca5aa00..6482390 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -625,14 +625,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
return p;
}
- /*
- * Proceed to the next layer at the current level. Unlike
- * idr_for_each(), @id isn't guaranteed to be aligned to
- * layer boundary at this point and adding 1 << n may
- * incorrectly skip IDs. Make sure we jump to the
- * beginning of the next layer using round_up().
- */
- id = round_up(id + 1, 1 << n);
+ id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
diff --git a/lib/kobject.c b/lib/kobject.c
index a654866..e07ee1f 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,13 +529,6 @@ struct kobject *kobject_get(struct kobject *kobj)
return kobj;
}
-static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
-{
- if (!kref_get_unless_zero(&kobj->kref))
- kobj = NULL;
- return kobj;
-}
-
/*
* kobject_cleanup - free kobject resources.
* @kobj: object to cleanup
@@ -758,7 +751,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
- ret = kobject_get_unless_zero(k);
+ ret = kobject_get(k);
break;
}
}
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 23b8564..7aae0f2 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
* Normal standalone locks, for the circular and irq-context
* dependency tests:
*/
-static DEFINE_RAW_SPINLOCK(lock_A);
-static DEFINE_RAW_SPINLOCK(lock_B);
-static DEFINE_RAW_SPINLOCK(lock_C);
-static DEFINE_RAW_SPINLOCK(lock_D);
+static DEFINE_SPINLOCK(lock_A);
+static DEFINE_SPINLOCK(lock_B);
+static DEFINE_SPINLOCK(lock_C);
+static DEFINE_SPINLOCK(lock_D);
static DEFINE_RWLOCK(rwlock_A);
static DEFINE_RWLOCK(rwlock_B);
@@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D);
* but X* and Y* are different classes. We do this so that
* we do not trigger a real lockup:
*/
-static DEFINE_RAW_SPINLOCK(lock_X1);
-static DEFINE_RAW_SPINLOCK(lock_X2);
-static DEFINE_RAW_SPINLOCK(lock_Y1);
-static DEFINE_RAW_SPINLOCK(lock_Y2);
-static DEFINE_RAW_SPINLOCK(lock_Z1);
-static DEFINE_RAW_SPINLOCK(lock_Z2);
+static DEFINE_SPINLOCK(lock_X1);
+static DEFINE_SPINLOCK(lock_X2);
+static DEFINE_SPINLOCK(lock_Y1);
+static DEFINE_SPINLOCK(lock_Y2);
+static DEFINE_SPINLOCK(lock_Z1);
+static DEFINE_SPINLOCK(lock_Z2);
static DEFINE_RWLOCK(rwlock_X1);
static DEFINE_RWLOCK(rwlock_X2);
@@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2);
*/
#define INIT_CLASS_FUNC(class) \
static noinline void \
-init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \
- struct mutex *mutex, struct rw_semaphore *rwsem)\
+init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
+ struct rw_semaphore *rwsem) \
{ \
- raw_spin_lock_init(lock); \
+ spin_lock_init(lock); \
rwlock_init(rwlock); \
mutex_init(mutex); \
init_rwsem(rwsem); \
@@ -168,10 +168,10 @@ static void init_shared_classes(void)
* Shortcuts for lock/unlock API variants, to keep
* the testcases compact:
*/
-#define L(x) raw_spin_lock(&lock_##x)
-#define U(x) raw_spin_unlock(&lock_##x)
+#define L(x) spin_lock(&lock_##x)
+#define U(x) spin_unlock(&lock_##x)
#define LU(x) L(x); U(x)
-#define SI(x) raw_spin_lock_init(&lock_##x)
+#define SI(x) spin_lock_init(&lock_##x)
#define WL(x) write_lock(&rwlock_##x)
#define WU(x) write_unlock(&rwlock_##x)
@@ -911,7 +911,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
#define I2(x) \
do { \
- raw_spin_lock_init(&lock_##x); \
+ spin_lock_init(&lock_##x); \
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
@@ -1175,7 +1175,6 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* irq-context testcases:
*/
@@ -1188,28 +1187,6 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
-#else
- /* On -rt, we only do hardirq context test for raw spinlock */
- DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
- DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
-
- DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
- DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
-
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
-
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
-#endif
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 318f382..d8de11f 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -9,7 +9,6 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/export.h>
#include <linux/oid_registry.h>
#include <linux/kernel.h>
@@ -17,10 +16,6 @@
#include <linux/bug.h>
#include "oid_registry_data.c"
-MODULE_DESCRIPTION("OID Registry");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
/**
* look_up_OID - Find an OID registration for the specified data
* @data: Binary representation of the OID
diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c
index 2db0f42..652a8ee 100644
--- a/lib/percpu-rwsem.c
+++ b/lib/percpu-rwsem.c
@@ -84,12 +84,8 @@ void percpu_down_read(struct percpu_rw_semaphore *brw)
down_read(&brw->rw_sem);
atomic_inc(&brw->slow_read_ctr);
-#ifdef CONFIG_PREEMPT_RT_FULL
- up_read(&brw->rw_sem);
-#else
/* avoid up_read()->rwsem_release() */
__up_read(&brw->rw_sem);
-#endif
}
void percpu_up_read(struct percpu_rw_semaphore *brw)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 63bac7d..e796429 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -215,13 +215,12 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = &get_cpu_var(radix_tree_preloads);
+ rtp = &__get_cpu_var(radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL;
rtp->nr--;
}
- put_cpu_var(radix_tree_preloads);
}
if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
@@ -256,7 +255,6 @@ radix_tree_node_free(struct radix_tree_node *node)
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
@@ -291,7 +289,6 @@ out:
return ret;
}
EXPORT_SYMBOL(radix_tree_preload);
-#endif
/*
* Return the maximum key which can be store into a
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 43603ee..7874b01 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -499,7 +499,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
- WARN_ON_ONCE(!pagefault_disabled());
+ WARN_ON_ONCE(preemptible());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
@@ -539,7 +539,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
sg_miter_start(&miter, sgl, nents, sg_flags);
- local_irq_save_nort(flags);
+ local_irq_save(flags);
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
@@ -556,7 +556,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
sg_miter_stop(&miter);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return offset;
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index dbb1570..4c0d0e5 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
- "code: %s/%d\n", preempt_count() - 1,
- __migrate_disabled(current), current->comm, current->pid);
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+ "code: %s/%d\n",
+ preempt_count() - 1, current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9497033..0374a59 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -31,7 +31,6 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init);
-#ifndef CONFIG_PREEMPT_RT_FULL
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
@@ -49,7 +48,6 @@ void __rwlock_init(rwlock_t *lock, const char *name,
}
EXPORT_SYMBOL(__rwlock_init);
-#endif
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
@@ -161,7 +159,6 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
@@ -303,5 +300,3 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
-
-#endif