summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-05 00:31:20 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-05 00:31:20 (GMT)
commitf8fbd8c49b7dee2e9b9d7c9754972fa2ca335251 (patch)
tree5f1e8994cce98c3817ab0038706820a5a8e5810e /mm
parent84e39eeb08c0ea7e9ec43ac820bf76a6fe8ecbad (diff)
parente47608ab6dbe63fefe60c211a30b3fc78a1b5d5e (diff)
downloadlinux-f8fbd8c49b7dee2e9b9d7c9754972fa2ca335251.tar.xz
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "A few late-breaking fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/memblock.c: fix NULL dereference error MAINTAINERS: update cgroup's document path slub: drop bogus inline for fixup_red_left() powerpc/fsl_rio: fix a missing error code mm: initialise per_cpu_nodestats for all online pgdats at boot mm/memblock: fix a typo in a comment mm: disable CONFIG_MEMORY_HOTPLUG when KASAN is enabled
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/memblock.c9
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slub.c2
4 files changed, 13 insertions, 9 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index c083784..78a23c5 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -187,6 +187,7 @@ config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on !KASAN
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/memblock.c b/mm/memblock.c
index ff5ff3b..483197e 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -482,7 +482,7 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
* @flags: flags of the new region
*
* Insert new memblock region [@base,@base+@size) into @type at @idx.
- * @type must already have extra room to accomodate the new region.
+ * @type must already have extra room to accommodate the new region.
*/
static void __init_memblock memblock_insert_region(struct memblock_type *type,
int idx, phys_addr_t base,
@@ -544,7 +544,7 @@ repeat:
/*
* The following is executed twice. Once with %false @insert and
* then with %true. The first counts the number of regions needed
- * to accomodate the new area. The second actually inserts them.
+ * to accommodate the new area. The second actually inserts them.
*/
base = obase;
nr_new = 0;
@@ -994,7 +994,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
if (*idx == (u64)ULLONG_MAX) {
idx_a = type_a->cnt - 1;
- idx_b = type_b->cnt;
+ if (type_b != NULL)
+ idx_b = type_b->cnt;
+ else
+ idx_b = 0;
}
for (; idx_a >= 0; idx_a--) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 39a372a..fb975ce 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5257,11 +5257,6 @@ static void __meminit setup_zone_pageset(struct zone *zone)
zone->pageset = alloc_percpu(struct per_cpu_pageset);
for_each_possible_cpu(cpu)
zone_pageset_init(zone, cpu);
-
- if (!zone->zone_pgdat->per_cpu_nodestats) {
- zone->zone_pgdat->per_cpu_nodestats =
- alloc_percpu(struct per_cpu_nodestat);
- }
}
/*
@@ -5270,10 +5265,15 @@ static void __meminit setup_zone_pageset(struct zone *zone)
*/
void __init setup_per_cpu_pageset(void)
{
+ struct pglist_data *pgdat;
struct zone *zone;
for_each_populated_zone(zone)
setup_zone_pageset(zone);
+
+ for_each_online_pgdat(pgdat)
+ pgdat->per_cpu_nodestats =
+ alloc_percpu(struct per_cpu_nodestat);
}
static noinline __ref
diff --git a/mm/slub.c b/mm/slub.c
index 26eb6a99..850737b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,7 +124,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
-inline void *fixup_red_left(struct kmem_cache *s, void *p)
+void *fixup_red_left(struct kmem_cache *s, void *p)
{
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
p += s->red_left_pad;