diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/bitmap.c | 8 | ||||
-rw-r--r-- | lib/btree.c | 1 | ||||
-rw-r--r-- | lib/debugobjects.c | 5 | ||||
-rw-r--r-- | lib/decompress_bunzip2.c | 2 | ||||
-rw-r--r-- | lib/idr.c | 45 | ||||
-rw-r--r-- | lib/locking-selftest.c | 23 | ||||
-rw-r--r-- | lib/lz4/lz4_decompress.c | 12 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress_safe.c | 43 | ||||
-rw-r--r-- | lib/nlattr.c | 4 | ||||
-rw-r--r-- | lib/percpu-rwsem.c | 4 | ||||
-rw-r--r-- | lib/percpu_counter.c | 2 | ||||
-rw-r--r-- | lib/plist.c | 52 | ||||
-rw-r--r-- | lib/radix-tree.c | 111 | ||||
-rw-r--r-- | lib/scatterlist.c | 6 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 6 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 5 | ||||
-rw-r--r-- | lib/string.c | 16 |
20 files changed, 168 insertions, 184 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index eee9e09..b3c8be0 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -57,7 +57,6 @@ config CMPXCHG_LOCKREF depends on !GENERIC_LOCKBREAK depends on !DEBUG_SPINLOCK depends on !DEBUG_LOCK_ALLOC - depends on !PREEMPT_RT_BASE config CRC_CCITT tristate "CRC-CCITT functions" @@ -344,7 +343,6 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS - depends on !PREEMPT_RT_FULL help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index bde5dd2..094f315 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -597,7 +597,7 @@ endmenu # "Memory Debugging" config DEBUG_SHIRQ bool "Debug shared IRQ handlers" - depends on DEBUG_KERNEL && !PREEMPT_RT_BASE + depends on DEBUG_KERNEL help Enable this to generate a spurious interrupt as soon as a shared interrupt handler is registered, and just before one is deregistered. diff --git a/lib/Makefile b/lib/Makefile index 4461595..6af6fbb 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -43,11 +43,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o - -ifneq ($(CONFIG_PREEMPT_RT_FULL),y) lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o -endif lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o GCOV_PROFILE_hweight.o := n diff --git a/lib/bitmap.c b/lib/bitmap.c index 06f7e4f..e5c4ebe 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst, lower = src[off + k]; if (left && off + k == lim - 1) lower &= mask; - dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; + dst[k] = lower >> rem; + if (rem) + dst[k] |= upper << (BITS_PER_LONG - rem); if (left && k == lim - 1) dst[k] &= mask; } @@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst, upper = src[k]; if (left && k == lim - 1) upper &= (1UL << left) - 1; - dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; + dst[k + off] = upper << rem; + if (rem) + dst[k + off] |= lower >> (BITS_PER_LONG - rem); if (left && k + off == lim - 1) dst[k + off] &= (1UL << left) - 1; } diff --git a/lib/btree.c b/lib/btree.c index f9a4846..4264871 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init); void btree_destroy(struct btree_head *head) { + mempool_free(head->node, head->mempool); mempool_destroy(head->mempool); head->mempool = NULL; } diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 1ac2049..bf2c8b1 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -308,10 +308,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; -#ifdef CONFIG_PREEMPT_RT_FULL - if (preempt_count() == 0 && !irqs_disabled()) -#endif - fill_pool(); + fill_pool(); db = get_bucket((unsigned long) addr); diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 31c5f76..f504027 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd) if (get_bits(bd, 1)) return RETVAL_OBSOLETE_INPUT; origPtr = get_bits(bd, 24); - if (origPtr > dbufSize) + if (origPtr >= dbufSize) return RETVAL_DATA_ERROR; /* mapping table: if some byte values are never used (encoding things like ascii text), the compression code removes the gaps to have fewer @@ -37,7 +37,6 @@ #include <linux/spinlock.h> #include <linux/percpu.h> #include <linux/hardirq.h> -#include <linux/locallock.h> #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) @@ -251,7 +250,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; /* if already at the top layer, we need to grow */ - if (id >= 1 << (idp->layers * IDR_BITS)) { + if (id > idr_max(idp->layers)) { *starting_id = id; return -EAGAIN; } @@ -390,36 +389,6 @@ int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) } EXPORT_SYMBOL(__idr_get_new_above); -#ifdef CONFIG_PREEMPT_RT_FULL -static DEFINE_LOCAL_IRQ_LOCK(idr_lock); - -static inline void idr_preload_lock(void) -{ - local_lock(idr_lock); -} - -static inline void idr_preload_unlock(void) -{ - local_unlock(idr_lock); -} - -void idr_preload_end(void) -{ - idr_preload_unlock(); -} -EXPORT_SYMBOL(idr_preload_end); -#else -static inline void idr_preload_lock(void) -{ - preempt_disable(); -} - -static inline void idr_preload_unlock(void) -{ - preempt_enable(); -} -#endif - /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading @@ -454,7 +423,7 @@ void idr_preload(gfp_t gfp_mask) WARN_ON_ONCE(in_interrupt()); might_sleep_if(gfp_mask & __GFP_WAIT); - idr_preload_lock(); + preempt_disable(); /* * idr_alloc() is likely to succeed w/o full idr_layer buffer and @@ -466,9 +435,9 @@ void idr_preload(gfp_t gfp_mask) while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { struct idr_layer *new; - idr_preload_unlock(); + preempt_enable(); new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - idr_preload_lock(); + preempt_disable(); if (!new) break; @@ -858,12 +827,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id) if (!p) return ERR_PTR(-EINVAL); - n = (p->layer+1) * IDR_BITS; - - if (id >= (1 << n)) + if (id > idr_max(p->layer + 1)) return ERR_PTR(-EINVAL); - n -= IDR_BITS; + n = p->layer * IDR_BITS; while ((n > 0) && p) { p = p->ary[(id >> n) & IDR_MASK]; n -= IDR_BITS; diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 0acf354..6dc09d8 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1858,7 +1858,6 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); -#ifndef CONFIG_PREEMPT_RT_FULL /* * irq-context testcases: */ @@ -1871,28 +1870,6 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); -#else - /* On -rt, we only do hardirq context test for raw spinlock */ - DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); - DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); - - DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); - DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); - - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); - - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); -#endif ww_tests(); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index df6839e..7a85967 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) len = *ip++; for (; len == 255; length += 255) len = *ip++; + if (unlikely(length > (size_t)(length + len))) + goto _output_error; length += len; } @@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) if (length == ML_MASK) { for (; *ip == 255; length += 255) ip++; + if (unlikely(length > (size_t)(length + *ip))) + goto _output_error; length += *ip++; } @@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize) /* write overflow error detected */ _output_error: - return (int) (-(((char *)ip) - source)); + return -1; } static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, @@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, int s = 255; while ((ip < iend) && (s == 255)) { s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; } } @@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, if (length == ML_MASK) { while (ip < iend) { int s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; if (s == 255) continue; @@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, /* write overflow error detected */ _output_error: - return (int) (-(((char *) ip) - source)); + return -1; } int lz4_decompress(const unsigned char *src, size_t *src_len, diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 569985d..a1c387f 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -25,6 +25,16 @@ #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun #define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun +/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base + * count without overflowing an integer. The multiply will overflow when + * multiplying 255 by more than MAXINT/255. The sum will overflow earlier + * depending on the base count. Since the base count is taken from a u8 + * and a few bits, it is safe to assume that it will always be lower than + * or equal to 2*255, thus we can always prevent any overflow by accepting + * two less 255 steps. See Documentation/lzo.txt for more information. + */ +#define MAX_255_COUNT ((((size_t)~0) / 255) - 2) + int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len) { @@ -55,12 +65,19 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, if (t < 16) { if (likely(state == 0)) { if (unlikely(t == 0)) { + size_t offset; + const unsigned char *ip_last = ip; + while (unlikely(*ip == 0)) { - t += 255; ip++; NEED_IP(1); } - t += 15 + *ip++; + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; + + offset = (offset << 8) - offset; + t += offset + 15 + *ip++; } t += 3; copy_literal_run: @@ -116,12 +133,19 @@ copy_literal_run: } else if (t >= 32) { t = (t & 31) + (3 - 1); if (unlikely(t == 2)) { + size_t offset; + const unsigned char *ip_last = ip; + while (unlikely(*ip == 0)) { - t += 255; ip++; NEED_IP(1); } - t += 31 + *ip++; + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; + + offset = (offset << 8) - offset; + t += offset + 31 + *ip++; NEED_IP(2); } m_pos = op - 1; @@ -134,12 +158,19 @@ copy_literal_run: m_pos -= (t & 8) << 11; t = (t & 7) + (3 - 1); if (unlikely(t == 2)) { + size_t offset; + const unsigned char *ip_last = ip; + while (unlikely(*ip == 0)) { - t += 255; ip++; NEED_IP(1); } - t += 7 + *ip++; + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; + + offset = (offset << 8) - offset; + t += offset + 7 + *ip++; NEED_IP(2); } next = get_unaligned_le16(ip); diff --git a/lib/nlattr.c b/lib/nlattr.c index fc67547..10ad042 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, } if (unlikely(rem > 0)) - printk(KERN_WARNING "netlink: %d bytes leftover after parsing " - "attributes.\n", rem); + pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", + rem, current->comm); err = 0; errout: diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c index 2db0f42..652a8ee 100644 --- a/lib/percpu-rwsem.c +++ b/lib/percpu-rwsem.c @@ -84,12 +84,8 @@ void percpu_down_read(struct percpu_rw_semaphore *brw) down_read(&brw->rw_sem); atomic_inc(&brw->slow_read_ctr); -#ifdef CONFIG_PREEMPT_RT_FULL - up_read(&brw->rw_sem); -#else /* avoid up_read()->rwsem_release() */ __up_read(&brw->rw_sem); -#endif } void percpu_up_read(struct percpu_rw_semaphore *brw) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 93c5d5e..741a426 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -166,7 +166,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, struct percpu_counter *fbc; compute_batch_value(); - if (action != CPU_DEAD) + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) return NOTIFY_OK; cpu = (unsigned long)hcpu; diff --git a/lib/plist.c b/lib/plist.c index 1ebc95f..0f2084d 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head) plist_check_head(head); } +/** + * plist_requeue - Requeue @node at end of same-prio entries. + * + * This is essentially an optimized plist_del() followed by + * plist_add(). It moves an entry already in the plist to + * after any other same-priority entries. + * + * @node: &struct plist_node pointer - entry to be moved + * @head: &struct plist_head pointer - list head + */ +void plist_requeue(struct plist_node *node, struct plist_head *head) +{ + struct plist_node *iter; + struct list_head *node_next = &head->node_list; + + plist_check_head(head); + BUG_ON(plist_head_empty(head)); + BUG_ON(plist_node_empty(node)); + + if (node == plist_last(head)) + return; + + iter = plist_next(node); + + if (node->prio != iter->prio) + return; + + plist_del(node, head); + + plist_for_each_continue(iter, head) { + if (node->prio != iter->prio) { + node_next = &iter->node_list; + break; + } + } + list_add_tail(&node->node_list, node_next); + + plist_check_head(head); +} + #ifdef CONFIG_DEBUG_PI_LIST #include <linux/sched.h> #include <linux/module.h> @@ -170,6 +210,14 @@ static void __init plist_test_check(int nr_expect) BUG_ON(prio_pos->prio_list.next != &first->prio_list); } +static void __init plist_test_requeue(struct plist_node *node) +{ + plist_requeue(node, &test_head); + + if (node != plist_last(&test_head)) + BUG_ON(node->prio == plist_next(node)->prio); +} + static int __init plist_test(void) { int nr_expect = 0, i, loop; @@ -193,6 +241,10 @@ static int __init plist_test(void) nr_expect--; } plist_test_check(nr_expect); + if (!plist_node_empty(test_node + i)) { + plist_test_requeue(test_node + i); + plist_test_check(nr_expect); + } } for (i = 0; i < ARRAY_SIZE(test_node); i++) { diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e7b61e8..e8adb5d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -221,13 +221,12 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &get_cpu_var(radix_tree_preloads); + rtp = &__get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } - put_cpu_var(radix_tree_preloads); } if (ret == NULL) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); @@ -262,7 +261,6 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } -#ifndef CONFIG_PREEMPT_RT_FULL /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -328,7 +326,6 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); -#endif /* * Return the maximum key which can be store into a @@ -949,81 +946,6 @@ next: } EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); - -/** - * radix_tree_next_hole - find the next hole (not-present entry) - * @root: tree root - * @index: index key - * @max_scan: maximum range to search - * - * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest - * indexed hole. - * - * Returns: the index of the hole if found, otherwise returns an index - * outside of the set specified (in which case 'return - index >= max_scan' - * will be true). In rare cases of index wrap-around, 0 will be returned. - * - * radix_tree_next_hole may be called under rcu_read_lock. However, like - * radix_tree_gang_lookup, this will not atomically search a snapshot of - * the tree at a single point in time. For example, if a hole is created - * at index 5, then subsequently a hole is created at index 10, - * radix_tree_next_hole covering both indexes may return 10 if called - * under rcu_read_lock. - */ -unsigned long radix_tree_next_hole(struct radix_tree_root *root, - unsigned long index, unsigned long max_scan) -{ - unsigned long i; - - for (i = 0; i < max_scan; i++) { - if (!radix_tree_lookup(root, index)) - break; - index++; - if (index == 0) - break; - } - - return index; -} -EXPORT_SYMBOL(radix_tree_next_hole); - -/** - * radix_tree_prev_hole - find the prev hole (not-present entry) - * @root: tree root - * @index: index key - * @max_scan: maximum range to search - * - * Search backwards in the range [max(index-max_scan+1, 0), index] - * for the first hole. - * - * Returns: the index of the hole if found, otherwise returns an index - * outside of the set specified (in which case 'index - return >= max_scan' - * will be true). In rare cases of wrap-around, ULONG_MAX will be returned. - * - * radix_tree_next_hole may be called under rcu_read_lock. However, like - * radix_tree_gang_lookup, this will not atomically search a snapshot of - * the tree at a single point in time. For example, if a hole is created - * at index 10, then subsequently a hole is created at index 5, - * radix_tree_prev_hole covering both indexes may return 5 if called under - * rcu_read_lock. - */ -unsigned long radix_tree_prev_hole(struct radix_tree_root *root, - unsigned long index, unsigned long max_scan) -{ - unsigned long i; - - for (i = 0; i < max_scan; i++) { - if (!radix_tree_lookup(root, index)) - break; - index--; - if (index == ULONG_MAX) - break; - } - - return index; -} -EXPORT_SYMBOL(radix_tree_prev_hole); - /** * radix_tree_gang_lookup - perform multiple lookup on a radix tree * @root: radix tree root @@ -1338,15 +1260,18 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) } /** - * radix_tree_delete - delete an item from a radix tree + * radix_tree_delete_item - delete an item from a radix tree * @root: radix tree root * @index: index key + * @item: expected item * - * Remove the item at @index from the radix tree rooted at @root. + * Remove @item at @index from the radix tree rooted at @root. * - * Returns the address of the deleted item, or NULL if it was not present. + * Returns the address of the deleted item, or NULL if it was not present + * or the entry at the given @index was not @item. */ -void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) +void *radix_tree_delete_item(struct radix_tree_root *root, + unsigned long index, void *item) { struct radix_tree_node *node = NULL; struct radix_tree_node *slot = NULL; @@ -1381,6 +1306,11 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) if (slot == NULL) goto out; + if (item && slot != item) { + slot = NULL; + goto out; + } + /* * Clear all tags associated with the item to be deleted. * This way of doing it would be inefficient, but seldom is any set. @@ -1425,6 +1355,21 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) out: return slot; } +EXPORT_SYMBOL(radix_tree_delete_item); + +/** + * radix_tree_delete - delete an item from a radix tree + * @root: radix tree root + * @index: index key + * + * Remove the item at @index from the radix tree rooted at @root. + * + * Returns the address of the deleted item, or NULL if it was not present. + */ +void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) +{ + return radix_tree_delete_item(root, index, NULL); +} EXPORT_SYMBOL(radix_tree_delete); /** diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9c1236e..d16fa29 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -582,7 +582,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(!pagefault_disabled()); + WARN_ON_ONCE(preemptible()); kunmap_atomic(miter->addr); } else kunmap(miter->page); @@ -627,7 +627,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, if (!sg_miter_skip(&miter, skip)) return false; - local_irq_save_nort(flags); + local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -644,7 +644,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_stop(&miter); - local_irq_restore_nort(flags); + local_irq_restore(flags); return offset; } diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index dbb1570..4c0d0e5 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void) if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " - "code: %s/%d\n", preempt_count() - 1, - __migrate_disabled(current), current->comm, current->pid); + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " + "code: %s/%d\n", + preempt_count() - 1, current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9497033..0374a59 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -31,7 +31,6 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); -#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -49,7 +48,6 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); -#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -161,7 +159,6 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } -#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -303,5 +300,3 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } - -#endif diff --git a/lib/string.c b/lib/string.c index e5878de..43d0781 100644 --- a/lib/string.c +++ b/lib/string.c @@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count) EXPORT_SYMBOL(memset); #endif +/** + * memzero_explicit - Fill a region of memory (e.g. sensitive + * keying data) with 0s. + * @s: Pointer to the start of the area. + * @count: The size of the area. + * + * memzero_explicit() doesn't need an arch-specific version as + * it just invokes the one of memset() implicitly. + */ +void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + OPTIMIZER_HIDE_VAR(s); +} +EXPORT_SYMBOL(memzero_explicit); + #ifndef __HAVE_ARCH_MEMCPY /** * memcpy - Copy one area of memory to another |