From 1e185736d21448f986ff338ce116e20260ffe03c Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Thu, 4 Aug 2016 15:31:44 -0700 Subject: mm: disable CONFIG_MEMORY_HOTPLUG when KASAN is enabled At present it is obvious that memory online and offline will fail when KASAN is enabled. So add the condition to limit the memory_hotplug when KASAN is enabled. Link: http://lkml.kernel.org/r/1470063651-29519-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/Kconfig b/mm/Kconfig index c083784..78a23c5 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -187,6 +187,7 @@ config MEMORY_HOTPLUG bool "Allow for memory hot-add" depends on SPARSEMEM || X86_64_ACPI_NUMA depends on ARCH_ENABLE_MEMORY_HOTPLUG + depends on !KASAN config MEMORY_HOTPLUG_SPARSE def_bool y -- cgit v0.10.2 From 412d0008d69ce964ac60ac9da714edb40bbae244 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Thu, 4 Aug 2016 15:31:46 -0700 Subject: mm/memblock: fix a typo in a comment s/accomodate/accommodate/ Link: http://lkml.kernel.org/r/20160804121824.18100-1-kuleshovmail@gmail.com Signed-off-by: Alexander Kuleshov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memblock.c b/mm/memblock.c index ff5ff3b..1f065da 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -482,7 +482,7 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) * @flags: flags of the new region * * Insert new memblock region [@base,@base+@size) into @type at @idx. - * @type must already have extra room to accomodate the new region. + * @type must already have extra room to accommodate the new region. */ static void __init_memblock memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, @@ -544,7 +544,7 @@ repeat: /* * The following is executed twice. Once with %false @insert and * then with %true. The first counts the number of regions needed - * to accomodate the new area. The second actually inserts them. + * to accommodate the new area. The second actually inserts them. */ base = obase; nr_new = 0; -- cgit v0.10.2 From b4911ea2bcf52345bf3dc73292b8c4676f6192c6 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 4 Aug 2016 15:31:49 -0700 Subject: mm: initialise per_cpu_nodestats for all online pgdats at boot Paul Mackerras and Reza Arbab reported that machines with memoryless nodes fail when vmstats are refreshed. Paul reported an oops as follows Unable to handle kernel paging request for data at address 0xff7a10000 Faulting instruction address: 0xc000000000270cd0 Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.7.0-kvm+ #118 task: c000000ff0680010 task.stack: c000000ff0704000 NIP: c000000000270cd0 LR: c000000000270ce8 CTR: 0000000000000000 REGS: c000000ff0707900 TRAP: 0300 Not tainted (4.7.0-kvm+) MSR: 9000000102009033 CR: 846b6824 XER: 20000000 CFAR: c000000000008768 DAR: 0000000ff7a10000 DSISR: 42000000 SOFTE: 1 NIP refresh_zone_stat_thresholds+0x80/0x240 LR refresh_zone_stat_thresholds+0x98/0x240 Call Trace: refresh_zone_stat_thresholds+0xb8/0x240 (unreliable) Both supplied potential fixes but one potentially misses checks and another had redundant initialisations. This version initialises per_cpu_nodestats on a per-pgdat basis instead of on a per-zone basis. Link: http://lkml.kernel.org/r/20160804092404.GI2799@techsingularity.net Signed-off-by: Mel Gorman Reported-by: Paul Mackerras Reported-by: Reza Arbab Tested-by: Reza Arbab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 39a372a..fb975ce 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5257,11 +5257,6 @@ static void __meminit setup_zone_pageset(struct zone *zone) zone->pageset = alloc_percpu(struct per_cpu_pageset); for_each_possible_cpu(cpu) zone_pageset_init(zone, cpu); - - if (!zone->zone_pgdat->per_cpu_nodestats) { - zone->zone_pgdat->per_cpu_nodestats = - alloc_percpu(struct per_cpu_nodestat); - } } /* @@ -5270,10 +5265,15 @@ static void __meminit setup_zone_pageset(struct zone *zone) */ void __init setup_per_cpu_pageset(void) { + struct pglist_data *pgdat; struct zone *zone; for_each_populated_zone(zone) setup_zone_pageset(zone); + + for_each_online_pgdat(pgdat) + pgdat->per_cpu_nodestats = + alloc_percpu(struct per_cpu_nodestat); } static noinline __ref -- cgit v0.10.2 From 380afa3698781e19699d2585b655e699b2da0fa0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 4 Aug 2016 15:31:52 -0700 Subject: powerpc/fsl_rio: fix a missing error code We should set the error code here rather than incorrectly returning 0. Otherwise static checkers complain. Link: http://lkml.kernel.org/r/20160804053525.GM775@mwanda Signed-off-by: Dan Carpenter Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Alexandre Bounine Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 984e816..68e7c0d 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -491,6 +491,7 @@ int fsl_rio_setup(struct platform_device *dev) rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); if (!rmu_node) { dev_err(&dev->dev, "No valid fsl,srio-rmu-handle property\n"); + rc = -ENOENT; goto err_rmu; } rc = of_address_to_resource(rmu_node, 0, &rmu_regs); -- cgit v0.10.2 From 117d54df7a33ed1615a331161501090f9058a61e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 4 Aug 2016 15:31:55 -0700 Subject: slub: drop bogus inline for fixup_red_left() With m68k-linux-gnu-gcc-4.1: include/linux/slub_def.h:126: warning: `fixup_red_left' declared inline after being called include/linux/slub_def.h:126: warning: previous declaration of `fixup_red_left' was here Commit c146a2b98eb5 ("mm, kasan: account for object redzone in SLUB's nearest_obj()") made fixup_red_left() global, but forgot to remove the inline keyword. Fixes: c146a2b98eb5898e ("mm, kasan: account for object redzone in SLUB's nearest_obj()") Link: http://lkml.kernel.org/r/1470256262-1586-1-git-send-email-geert@linux-m68k.org Signed-off-by: Geert Uytterhoeven Cc: Alexander Potapenko Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/slub.c b/mm/slub.c index 26eb6a99..850737b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -124,7 +124,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s) #endif } -inline void *fixup_red_left(struct kmem_cache *s, void *p) +void *fixup_red_left(struct kmem_cache *s, void *p) { if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) p += s->red_left_pad; -- cgit v0.10.2 From 4b16b0c0bf416de61d14ac31703883d76395ed5f Mon Sep 17 00:00:00 2001 From: "seokhoon.yoon" Date: Thu, 4 Aug 2016 15:31:57 -0700 Subject: MAINTAINERS: update cgroup's document path cgroup's document path is changed to "cgroup-v1". update it. Link: http://lkml.kernel.org/r/1470322507-5161-1-git-send-email-iamyooon@gmail.com Signed-off-by: seokhoon.yoon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/MAINTAINERS b/MAINTAINERS index bafc804..f153cf6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3219,7 +3219,7 @@ M: Johannes Weiner L: cgroups@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git S: Maintained -F: Documentation/cgroups/ +F: Documentation/cgroup* F: include/linux/cgroup* F: kernel/cgroup* @@ -3230,7 +3230,7 @@ W: http://www.bullopensource.org/cpuset/ W: http://oss.sgi.com/projects/cpusets/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git S: Maintained -F: Documentation/cgroups/cpusets.txt +F: Documentation/cgroup-v1/cpusets.txt F: include/linux/cpuset.h F: kernel/cpuset.c -- cgit v0.10.2 From e47608ab6dbe63fefe60c211a30b3fc78a1b5d5e Mon Sep 17 00:00:00 2001 From: zijun_hu Date: Thu, 4 Aug 2016 15:32:00 -0700 Subject: mm/memblock.c: fix NULL dereference error It causes NULL dereference error and failure to get type_a->regions[0] info if parameter type_b of __next_mem_range_rev() == NULL Fix this by checking before dereferring and initializing idx_b to 0 The approach is tested by dumping all types of region via __memblock_dump_all() and __next_mem_range_rev() fixed to UART separately the result is okay after checking the logs. Link: http://lkml.kernel.org/r/57A0320D.6070102@zoho.com Signed-off-by: zijun_hu Tested-by: zijun_hu Acked-by: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memblock.c b/mm/memblock.c index 1f065da..483197e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -994,7 +994,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, if (*idx == (u64)ULLONG_MAX) { idx_a = type_a->cnt - 1; - idx_b = type_b->cnt; + if (type_b != NULL) + idx_b = type_b->cnt; + else + idx_b = 0; } for (; idx_a >= 0; idx_a--) { -- cgit v0.10.2