diff options
author | Scott Wood <scottwood@freescale.com> | 2014-04-10 00:49:54 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-04-10 00:49:54 (GMT) |
commit | b86c95253af2105c9824146c6569a6b0f39ab124 (patch) | |
tree | 9100acbdc843b1081b154135000b89ee95cd10d3 /lib | |
parent | e5feac72dad5475167445de9af564c2d592872bb (diff) | |
parent | 07c8b57b111585a617b2b456497fc9b33c00743c (diff) | |
download | linux-fsl-qoriq-b86c95253af2105c9824146c6569a6b0f39ab124.tar.xz |
Merge branch 'rtmerge' into sdk-v1.6.x
This merges 3.12.15-rt25.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Conflicts:
drivers/misc/Makefile
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/gianfar_sysfs.c
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/debugobjects.c | 5 | ||||
-rw-r--r-- | lib/idr.c | 37 | ||||
-rw-r--r-- | lib/locking-selftest.c | 23 | ||||
-rw-r--r-- | lib/percpu-rwsem.c | 4 | ||||
-rw-r--r-- | lib/radix-tree.c | 5 | ||||
-rw-r--r-- | lib/scatterlist.c | 6 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 6 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 5 |
11 files changed, 86 insertions, 12 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2479a69..4490089 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -57,6 +57,7 @@ config CMPXCHG_LOCKREF depends on !GENERIC_LOCKBREAK depends on !DEBUG_SPINLOCK depends on !DEBUG_LOCK_ALLOC + depends on !PREEMPT_RT_BASE config CRC_CCITT tristate "CRC-CCITT functions" @@ -350,6 +351,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + depends on !PREEMPT_RT_FULL help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 094f315..bde5dd2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -597,7 +597,7 @@ endmenu # "Memory Debugging" config DEBUG_SHIRQ bool "Debug shared IRQ handlers" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT_BASE help Enable this to generate a spurious interrupt as soon as a shared interrupt handler is registered, and just before one is deregistered. diff --git a/lib/Makefile b/lib/Makefile index d7ce91e..6e23a0f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -43,8 +43,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o + +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +endif lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o GCOV_PROFILE_hweight.o := n diff --git a/lib/debugobjects.c b/lib/debugobjects.c index bf2c8b1..1ac2049 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; - fill_pool(); +#ifdef CONFIG_PREEMPT_RT_FULL + if (preempt_count() == 0 && !irqs_disabled()) +#endif + fill_pool(); db = get_bucket((unsigned long) addr); @@ -37,6 +37,7 @@ #include <linux/spinlock.h> #include <linux/percpu.h> #include <linux/hardirq.h> +#include <linux/locallock.h> #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) @@ -389,6 +390,36 @@ int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) } EXPORT_SYMBOL(__idr_get_new_above); +#ifdef CONFIG_PREEMPT_RT_FULL +static DEFINE_LOCAL_IRQ_LOCK(idr_lock); + +static inline void idr_preload_lock(void) +{ + local_lock(idr_lock); +} + +static inline void idr_preload_unlock(void) +{ + local_unlock(idr_lock); +} + +void idr_preload_end(void) +{ + idr_preload_unlock(); +} +EXPORT_SYMBOL(idr_preload_end); +#else +static inline void idr_preload_lock(void) +{ + preempt_disable(); +} + +static inline void idr_preload_unlock(void) +{ + preempt_enable(); +} +#endif + /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading @@ -423,7 +454,7 @@ void idr_preload(gfp_t gfp_mask) WARN_ON_ONCE(in_interrupt()); might_sleep_if(gfp_mask & __GFP_WAIT); - preempt_disable(); + idr_preload_lock(); /* * idr_alloc() is likely to succeed w/o full idr_layer buffer and @@ -435,9 +466,9 @@ void idr_preload(gfp_t gfp_mask) while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { struct idr_layer *new; - preempt_enable(); + idr_preload_unlock(); new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - preempt_disable(); + idr_preload_lock(); if (!new) break; diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 6dc09d8..0acf354 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1858,6 +1858,7 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); +#ifndef CONFIG_PREEMPT_RT_FULL /* * irq-context testcases: */ @@ -1870,6 +1871,28 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); +#else + /* On -rt, we only do hardirq context test for raw spinlock */ + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); +#endif ww_tests(); diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c index 652a8ee..2db0f42 100644 --- a/lib/percpu-rwsem.c +++ b/lib/percpu-rwsem.c @@ -84,8 +84,12 @@ void percpu_down_read(struct percpu_rw_semaphore *brw) down_read(&brw->rw_sem); atomic_inc(&brw->slow_read_ctr); +#ifdef CONFIG_PREEMPT_RT_FULL + up_read(&brw->rw_sem); +#else /* avoid up_read()->rwsem_release() */ __up_read(&brw->rw_sem); +#endif } void percpu_up_read(struct percpu_rw_semaphore *brw) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7811ed3..e7b61e8 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -221,12 +221,13 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &__get_cpu_var(radix_tree_preloads); + rtp = &get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } + put_cpu_var(radix_tree_preloads); } if (ret == NULL) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); @@ -261,6 +262,7 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } +#ifndef CONFIG_PREEMPT_RT_FULL /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -326,6 +328,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); +#endif /* * Return the maximum key which can be store into a diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d16fa29..9c1236e 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -582,7 +582,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); @@ -627,7 +627,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, if (!sg_miter_skip(&miter, skip)) return false; - local_irq_save(flags); + local_irq_save_nort(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -644,7 +644,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_stop(&miter); - local_irq_restore(flags); + local_irq_restore_nort(flags); return offset; } diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4c0d0e5..dbb1570 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void) if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " - "code: %s/%d\n", - preempt_count() - 1, current->comm, current->pid); + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " + "code: %s/%d\n", preempt_count() - 1, + __migrate_disabled(current), current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 0374a59..9497033 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); +#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); +#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } +#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } + +#endif |