From 4b239f458c229de044d6905c2b0f9fe16ed9e01e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 17 Dec 2010 16:58:28 -0800 Subject: x86-64, mm: Put early page table high While dubug kdump, found current kernel will have problem with crashkernel=512M. It turns out that initial mapping is to 512M, and later initial mapping to 4G (acutally is 2040M in my platform), will put page table near 512M. then initial mapping to 128g will be near 2g. before this patch: [ 0.000000] initial memory mapped : 0 - 20000000 [ 0.000000] init_memory_mapping: [0x00000000000000-0x0000007f74ffff] [ 0.000000] 0000000000 - 007f600000 page 2M [ 0.000000] 007f600000 - 007f750000 page 4k [ 0.000000] kernel direct mapping tables up to 7f750000 @ [0x1fffc000-0x1fffffff] [ 0.000000] memblock_x86_reserve_range: [0x1fffc000-0x1fffdfff] PGTABLE [ 0.000000] init_memory_mapping: [0x00000100000000-0x0000207fffffff] [ 0.000000] 0100000000 - 2080000000 page 2M [ 0.000000] kernel direct mapping tables up to 2080000000 @ [0x7bc01000-0x7bc83fff] [ 0.000000] memblock_x86_reserve_range: [0x7bc01000-0x7bc7efff] PGTABLE [ 0.000000] RAMDISK: 7bc84000 - 7f745000 [ 0.000000] crashkernel reservation failed - No suitable area found. after patch: [ 0.000000] initial memory mapped : 0 - 20000000 [ 0.000000] init_memory_mapping: [0x00000000000000-0x0000007f74ffff] [ 0.000000] 0000000000 - 007f600000 page 2M [ 0.000000] 007f600000 - 007f750000 page 4k [ 0.000000] kernel direct mapping tables up to 7f750000 @ [0x7f74c000-0x7f74ffff] [ 0.000000] memblock_x86_reserve_range: [0x7f74c000-0x7f74dfff] PGTABLE [ 0.000000] init_memory_mapping: [0x00000100000000-0x0000207fffffff] [ 0.000000] 0100000000 - 2080000000 page 2M [ 0.000000] kernel direct mapping tables up to 2080000000 @ [0x207ff7d000-0x207fffffff] [ 0.000000] memblock_x86_reserve_range: [0x207ff7d000-0x207fffafff] PGTABLE [ 0.000000] RAMDISK: 7bc84000 - 7f745000 [ 0.000000] memblock_x86_reserve_range: [0x17000000-0x36ffffff] CRASH KERNEL [ 0.000000] Reserving 512MB of memory at 368MB for crashkernel (System RAM: 133120MB) It means with the patch, page table for [0, 2g) will need 2g, instead of under 512M, page table for [4g, 128g) will be near 128g, instead of under 2g. That would good, if we have lots of memory above 4g, like 1024g, or 2048g or 16T, will not put related page table under 2g. that would be have chance to fill the under 2g if 1G or 2M page is not used. the code change will use add map_low_page() and update unmap_low_page() for 64bit, and use them to get access the corresponding high memory for page table setting. Signed-off-by: Yinghai Lu LKML-Reference: <4D0C0734.7060900@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index c0e28a1..5863950 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -33,7 +33,7 @@ int direct_gbpages static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { - unsigned long puds, pmds, ptes, tables, start; + unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; phys_addr_t base; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; @@ -73,12 +73,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse, * need roughly 0.5KB per GB. */ #ifdef CONFIG_X86_32 - start = 0x7000; -#else - start = 0x8000; + good_end = max_pfn_mapped << PAGE_SHIFT; #endif - base = memblock_find_in_range(start, max_pfn_mapped< Date: Fri, 17 Dec 2010 16:58:40 -0800 Subject: x86-64, gart: Fix allocation with memblock When trying to change alloc_bootmem with memblock to go with real top-down Found one old system: [ 0.000000] Node 0: aperture @ ac000000 size 64 MB [ 0.000000] Aperture pointing to e820 RAM. Ignoring. [ 0.000000] Your BIOS doesn't leave a aperture memory hole [ 0.000000] Please enable the IOMMU option in the BIOS setup [ 0.000000] This costs you 64 MB of RAM [ 0.000000] memblock_x86_reserve_range: [0x2020000000-0x2023ffffff] aperture64 [ 0.000000] Cannot allocate aperture memory hole (ffff882020000000,65536K) [ 0.000000] memblock_x86_free_range: [0x2020000000-0x2023ffffff] [ 0.000000] Kernel panic - not syncing: Not enough memory for aperture [ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.37-rc5-tip-yh-06229-gb792dc2-dirty #331 [ 0.000000] Call Trace: [ 0.000000] [] ? panic+0x91/0x1a3 [ 0.000000] [] ? gart_iommu_hole_init+0x3d7/0x4a3 [ 0.000000] [] ? _etext+0x0/0x3 [ 0.000000] [] ? pci_iommu_alloc+0x47/0x71 [ 0.000000] [] ? mem_init+0x19/0xec [ 0.000000] [] ? start_kernel+0x20a/0x3e8 [ 0.000000] [] ? x86_64_start_reservations+0x9c/0xa0 [ 0.000000] [] ? x86_64_start_kernel+0x114/0x11b it means __alloc_bootmem_nopanic() get too high for that aperture. Use memblock_find_in_range() with limit directly. Signed-off-by: Yinghai Lu LKML-Reference: <4D0C0740.90104@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index dcd7c83..85f66b4 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -69,7 +69,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) static u32 __init allocate_aperture(void) { u32 aper_size; - void *p; + unsigned long addr; /* aper_size should <= 1G */ if (fallback_aper_order > 5) @@ -95,27 +95,26 @@ static u32 __init allocate_aperture(void) * so don't use 512M below as gart iommu, leave the space for kernel * code for safe */ - p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); + addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); + if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { + printk(KERN_ERR + "Cannot allocate aperture memory hole (%lx,%uK)\n", + addr, aper_size>>10); + return 0; + } + memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); /* * Kmemleak should not scan this block as it may not be mapped via the * kernel direct mapping. */ - kmemleak_ignore(p); - if (!p || __pa(p)+aper_size > 0xffffffff) { - printk(KERN_ERR - "Cannot allocate aperture memory hole (%p,%uK)\n", - p, aper_size>>10); - if (p) - free_bootmem(__pa(p), aper_size); - return 0; - } + kmemleak_ignore(phys_to_virt(addr)); printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", - aper_size >> 10, __pa(p)); - insert_aperture_resource((u32)__pa(p), aper_size); - register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, - (u32)__pa(p+aper_size) >> PAGE_SHIFT); + aper_size >> 10, addr); + insert_aperture_resource((u32)addr, aper_size); + register_nosave_region(addr >> PAGE_SHIFT, + (addr+aper_size) >> PAGE_SHIFT); - return (u32)__pa(p); + return (u32)addr; } -- cgit v0.10.2 From 1a4a678b12c84db9ae5dce424e0e97f0559bb57c Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 17 Dec 2010 16:59:07 -0800 Subject: memblock: Make find_memory_core_early() find from top-down That is used for find ram in node or bootmem type. We should make it top-down so it will be consistent to memblock_find, and to avoid allocating potentially valuable low memory before we actually need it. Suggested-by: Jeremy Fitzhardinge Signed-off-by: Yinghai Lu LKML-Reference: <4D0C075B.3040501@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07a6544..19413bf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3555,6 +3555,34 @@ static int __meminit next_active_region_index_in_nid(int index, int nid) return -1; } +/* + * Basic iterator support. Return the last range of PFNs for a node + * Note: nid == MAX_NUMNODES returns last region regardless of node + */ +static int __meminit last_active_region_index_in_nid(int nid) +{ + int i; + + for (i = nr_nodemap_entries - 1; i >= 0; i--) + if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) + return i; + + return -1; +} + +/* + * Basic iterator support. Return the previous active range of PFNs for a node + * Note: nid == MAX_NUMNODES returns next region regardless of node + */ +static int __meminit previous_active_region_index_in_nid(int index, int nid) +{ + for (index = index - 1; index >= 0; index--) + if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) + return index; + + return -1; +} + #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. @@ -3606,6 +3634,10 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node) for (i = first_active_region_index_in_nid(nid); i != -1; \ i = next_active_region_index_in_nid(i, nid)) +#define for_each_active_range_index_in_nid_reverse(i, nid) \ + for (i = last_active_region_index_in_nid(nid); i != -1; \ + i = previous_active_region_index_in_nid(i, nid)) + /** * free_bootmem_with_active_regions - Call free_bootmem_node for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. @@ -3644,7 +3676,7 @@ u64 __init find_memory_core_early(int nid, u64 size, u64 align, int i; /* Need to go over early_node_map to find out good range for node */ - for_each_active_range_index_in_nid(i, nid) { + for_each_active_range_index_in_nid_reverse(i, nid) { u64 addr; u64 ei_start, ei_last; u64 final_start, final_end; -- cgit v0.10.2 From 45635ab5e41bcde94a82f9a05d660ef77fe38c1b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 27 Dec 2010 16:47:54 -0800 Subject: x86: Change get_max_mapped() to inline Move it into head file. to prepare use it in other files. [ hpa: added missing and changed type to phys_addr_t. ] Signed-off-by: Yinghai Lu LKML-Reference: <4D1933BA.8000508@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 1df6621..93626e6 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -2,6 +2,7 @@ #define _ASM_X86_PAGE_DEFS_H #include +#include /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 @@ -45,6 +46,11 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; +static inline phys_addr_t get_max_mapped(void) +{ + return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; +} + extern unsigned long init_memory_mapping(unsigned long start, unsigned long end); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index df172c1..3def8c9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -669,15 +669,6 @@ static int __init parse_reservelow(char *p) early_param("reservelow", parse_reservelow); -static u64 __init get_max_mapped(void) -{ - u64 end = max_pfn_mapped; - - end <<= PAGE_SHIFT; - - return end; -} - /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures -- cgit v0.10.2 From dbef7b56d2fc5115f26f72a0b080283bbf972cab Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 27 Dec 2010 16:48:08 -0800 Subject: x86-64, numa: Allocate memnodemap under max_pfn_mapped We need to access it right way, so make sure that it is mapped already. Prepare to put page table on local node, and nodemap is used before that. Signed-off-by: Yinghai Lu LKML-Reference: <4D1933C8.7060105@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 7762a51..02d36ff 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -87,7 +87,7 @@ static int __init allocate_cachealigned_memnodemap(void) addr = 0x8000; nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); - nodemap_addr = memblock_find_in_range(addr, max_pfn< Date: Mon, 27 Dec 2010 16:48:17 -0800 Subject: x86-64, numa: Put pgtable to local node memory Introduce init_memory_mapping_high(), and use it with 64bit. It will go with every memory segment above 4g to create page table to the memory range itself. before this patch all page tables was on one node. with this patch, one RED-PEN is killed debug out for 8 sockets system after patch [ 0.000000] initial memory mapped : 0 - 20000000 [ 0.000000] init_memory_mapping: [0x00000000000000-0x0000007f74ffff] [ 0.000000] 0000000000 - 007f600000 page 2M [ 0.000000] 007f600000 - 007f750000 page 4k [ 0.000000] kernel direct mapping tables up to 7f750000 @ [0x7f74c000-0x7f74ffff] [ 0.000000] RAMDISK: 7bc84000 - 7f745000 .... [ 0.000000] Adding active range (0, 0x10, 0x95) 0 entries of 3200 used [ 0.000000] Adding active range (0, 0x100, 0x7f750) 1 entries of 3200 used [ 0.000000] Adding active range (0, 0x100000, 0x1080000) 2 entries of 3200 used [ 0.000000] Adding active range (1, 0x1080000, 0x2080000) 3 entries of 3200 used [ 0.000000] Adding active range (2, 0x2080000, 0x3080000) 4 entries of 3200 used [ 0.000000] Adding active range (3, 0x3080000, 0x4080000) 5 entries of 3200 used [ 0.000000] Adding active range (4, 0x4080000, 0x5080000) 6 entries of 3200 used [ 0.000000] Adding active range (5, 0x5080000, 0x6080000) 7 entries of 3200 used [ 0.000000] Adding active range (6, 0x6080000, 0x7080000) 8 entries of 3200 used [ 0.000000] Adding active range (7, 0x7080000, 0x8080000) 9 entries of 3200 used [ 0.000000] init_memory_mapping: [0x00000100000000-0x0000107fffffff] [ 0.000000] 0100000000 - 1080000000 page 2M [ 0.000000] kernel direct mapping tables up to 1080000000 @ [0x107ffbd000-0x107fffffff] [ 0.000000] memblock_x86_reserve_range: [0x107ffc2000-0x107fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00001080000000-0x0000207fffffff] [ 0.000000] 1080000000 - 2080000000 page 2M [ 0.000000] kernel direct mapping tables up to 2080000000 @ [0x207ff7d000-0x207fffffff] [ 0.000000] memblock_x86_reserve_range: [0x207ffc0000-0x207fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00002080000000-0x0000307fffffff] [ 0.000000] 2080000000 - 3080000000 page 2M [ 0.000000] kernel direct mapping tables up to 3080000000 @ [0x307ff3d000-0x307fffffff] [ 0.000000] memblock_x86_reserve_range: [0x307ffc0000-0x307fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00003080000000-0x0000407fffffff] [ 0.000000] 3080000000 - 4080000000 page 2M [ 0.000000] kernel direct mapping tables up to 4080000000 @ [0x407fefd000-0x407fffffff] [ 0.000000] memblock_x86_reserve_range: [0x407ffc0000-0x407fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00004080000000-0x0000507fffffff] [ 0.000000] 4080000000 - 5080000000 page 2M [ 0.000000] kernel direct mapping tables up to 5080000000 @ [0x507febd000-0x507fffffff] [ 0.000000] memblock_x86_reserve_range: [0x507ffc0000-0x507fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00005080000000-0x0000607fffffff] [ 0.000000] 5080000000 - 6080000000 page 2M [ 0.000000] kernel direct mapping tables up to 6080000000 @ [0x607fe7d000-0x607fffffff] [ 0.000000] memblock_x86_reserve_range: [0x607ffc0000-0x607fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00006080000000-0x0000707fffffff] [ 0.000000] 6080000000 - 7080000000 page 2M [ 0.000000] kernel direct mapping tables up to 7080000000 @ [0x707fe3d000-0x707fffffff] [ 0.000000] memblock_x86_reserve_range: [0x707ffc0000-0x707fffffff] PGTABLE [ 0.000000] init_memory_mapping: [0x00007080000000-0x0000807fffffff] [ 0.000000] 7080000000 - 8080000000 page 2M [ 0.000000] kernel direct mapping tables up to 8080000000 @ [0x807fdfc000-0x807fffffff] [ 0.000000] memblock_x86_reserve_range: [0x807ffbf000-0x807fffffff] PGTABLE [ 0.000000] Initmem setup node 0 [0000000000000000-000000107fffffff] [ 0.000000] NODE_DATA [0x0000107ffbd000-0x0000107ffc1fff] [ 0.000000] Initmem setup node 1 [0000001080000000-000000207fffffff] [ 0.000000] NODE_DATA [0x0000207ffbb000-0x0000207ffbffff] [ 0.000000] Initmem setup node 2 [0000002080000000-000000307fffffff] [ 0.000000] NODE_DATA [0x0000307ffbb000-0x0000307ffbffff] [ 0.000000] Initmem setup node 3 [0000003080000000-000000407fffffff] [ 0.000000] NODE_DATA [0x0000407ffbb000-0x0000407ffbffff] [ 0.000000] Initmem setup node 4 [0000004080000000-000000507fffffff] [ 0.000000] NODE_DATA [0x0000507ffbb000-0x0000507ffbffff] [ 0.000000] Initmem setup node 5 [0000005080000000-000000607fffffff] [ 0.000000] NODE_DATA [0x0000607ffbb000-0x0000607ffbffff] [ 0.000000] Initmem setup node 6 [0000006080000000-000000707fffffff] [ 0.000000] NODE_DATA [0x0000707ffbb000-0x0000707ffbffff] [ 0.000000] Initmem setup node 7 [0000007080000000-000000807fffffff] [ 0.000000] NODE_DATA [0x0000807ffba000-0x0000807ffbefff] Signed-off-by: Yinghai Lu LKML-Reference: <4D1933D1.9020609@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 93626e6..731d211 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -54,6 +54,8 @@ static inline phys_addr_t get_max_mapped(void) extern unsigned long init_memory_mapping(unsigned long start, unsigned long end); +void init_memory_mapping_high(void); + extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, int acpi, int k8); extern void free_initmem(void); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 3def8c9..fc0fe74 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -931,14 +931,6 @@ void __init setup_arch(char **cmdline_p) max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn< max_low_pfn) { - max_pfn_mapped = init_memory_mapping(1UL<<32, - max_pfn<> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); + init_memory_mapping_high(); + for_each_node_mask(i, node_possible_map) { + int j; + for (j = apicid_base; j < cores + apicid_base; j++) apicid_to_node[(i << bits) + j] = i; setup_node_bootmem(i, nodes[i].start, nodes[i].end); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 5863950..fa6fe75 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -65,16 +65,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #ifdef CONFIG_X86_32 /* for fixmap */ tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); -#endif - /* - * RED-PEN putting page tables only on node 0 could - * cause a hotspot and fill up ZONE_DMA. The page tables - * need roughly 0.5KB per GB. - */ -#ifdef CONFIG_X86_32 good_end = max_pfn_mapped << PAGE_SHIFT; #endif + base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); if (base == MEMBLOCK_ERROR) panic("Cannot find space for the kernel page tables"); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 024847d..194f273 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -607,9 +607,63 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, int acpi, int k8) { memblock_x86_register_active_regions(0, start_pfn, end_pfn); + init_memory_mapping_high(); } #endif +struct mapping_work_data { + unsigned long start; + unsigned long end; + unsigned long pfn_mapped; +}; + +static int __init_refok +mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax) +{ + struct mapping_work_data *data = datax; + unsigned long pfn_mapped; + unsigned long final_start, final_end; + + final_start = max_t(unsigned long, start_pfn<start); + final_end = min_t(unsigned long, end_pfn<end); + + if (final_end <= final_start) + return 0; + + pfn_mapped = init_memory_mapping(final_start, final_end); + + if (pfn_mapped > data->pfn_mapped) + data->pfn_mapped = pfn_mapped; + + return 0; +} + +static unsigned long __init_refok +init_memory_mapping_active_regions(unsigned long start, unsigned long end) +{ + struct mapping_work_data data; + + data.start = start; + data.end = end; + data.pfn_mapped = 0; + + work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data); + + return data.pfn_mapped; +} + +void __init_refok init_memory_mapping_high(void) +{ + if (max_pfn > max_low_pfn) { + max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32, + max_pfn<> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); + init_memory_mapping_high(); + for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - } acpi_fake_nodes(nodes, num_nodes); numa_init_array(); return 0; @@ -645,6 +646,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, for (i = 0; i < nr_cpu_ids; i++) numa_set_node(i, 0); memblock_x86_register_active_regions(0, start_pfn, last_pfn); + init_memory_mapping_high(); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); } diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index a35cb9d..0b961c8 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -433,6 +433,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) return -1; } + init_memory_mapping_high(); + /* Account for nodes with cpus and no memory */ nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed); -- cgit v0.10.2 From f005fe12b90c5b9fe180a09209a893e09affa8aa Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 27 Dec 2010 16:48:32 -0800 Subject: x86-64: Move out cleanup higmap [_brk_end, _end) out of init_memory_mapping() It is not related to init_memory_mapping(), and init_memory_mapping() is getting more bigger. So make it as seperated function and call it from reserve_brk() and that is point when _brk_end is concluded. Signed-off-by: Yinghai Lu LKML-Reference: <4D1933E0.7090305@kernel.org> Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index fc0fe74..635185b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -293,10 +293,32 @@ static void __init init_gbpages(void) else direct_gbpages = 0; } + +static void __init cleanup_highmap_brk_end(void) +{ + pud_t *pud; + pmd_t *pmd; + + mmu_cr4_features = read_cr4(); + + /* + * _brk_end cannot change anymore, but it and _end may be + * located on different 2M pages. cleanup_highmap(), however, + * can only consider _end when it runs, so destroy any + * mappings beyond _brk_end here. + */ + pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); + pmd = pmd_offset(pud, _brk_end - 1); + while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) + pmd_clear(pmd); +} #else static inline void init_gbpages(void) { } +static inline void cleanup_highmap_brk_end(void) +{ +} #endif static void __init reserve_brk(void) @@ -307,6 +329,8 @@ static void __init reserve_brk(void) /* Mark brk area as locked down and no longer taking any new allocations */ _brk_start = 0; + + cleanup_highmap_brk_end(); } #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fa6fe75..35ee75d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -270,25 +270,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, load_cr3(swapper_pg_dir); #endif -#ifdef CONFIG_X86_64 - if (!after_bootmem && !start) { - pud_t *pud; - pmd_t *pmd; - - mmu_cr4_features = read_cr4(); - - /* - * _brk_end cannot change anymore, but it and _end may be - * located on different 2M pages. cleanup_highmap(), however, - * can only consider _end when it runs, so destroy any - * mappings beyond _brk_end here. - */ - pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); - pmd = pmd_offset(pud, _brk_end - 1); - while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) - pmd_clear(pmd); - } -#endif __flush_tlb_all(); if (!after_bootmem && e820_table_end > e820_table_start) -- cgit v0.10.2 From d518573de63fb119e5e9a3137386544671387681 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Mon, 24 Jan 2011 16:05:40 +0100 Subject: x86, amd: Normalize compute unit IDs on multi-node processors On multi-node CPUs we don't need the socket wide compute unit ID but the node-wide compute unit ID. Thus we need to normalize the value. This is similar to what we do with cpu_core_id. A compute unit is then identified by physical_package_id, node_id, and compute_unit_id. Signed-off-by: Andreas Herrmann LKML-Reference: <1295881543-572552-2-git-send-email-hans.rosenfeld@amd.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb..990cc48 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -261,7 +261,7 @@ static int __cpuinit nearby_node(int apicid) #ifdef CONFIG_X86_HT static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) { - u32 nodes; + u32 nodes, cores_per_cu; u8 node_id; int cpu = smp_processor_id(); @@ -276,6 +276,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* get compute unit information */ smp_num_siblings = ((ebx >> 8) & 3) + 1; c->compute_unit_id = ebx & 0xff; + cores_per_cu = ((ebx >> 8) & 3) + 1; } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; @@ -288,15 +289,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* fixup multi-node processor information */ if (nodes > 1) { u32 cores_per_node; + u32 cus_per_node; set_cpu_cap(c, X86_FEATURE_AMD_DCM); cores_per_node = c->x86_max_cores / nodes; + cus_per_node = cores_per_node / cores_per_cu; /* store NodeID, use llc_shared_map to store sibling info */ per_cpu(cpu_llc_id, cpu) = node_id; /* core id to be in range from 0 to (cores_per_node - 1) */ - c->cpu_core_id = c->cpu_core_id % cores_per_node; + c->cpu_core_id %= cores_per_node; + c->compute_unit_id %= cus_per_node; } } #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0cbe8c0..fbaa222 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -414,6 +414,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) if (cpu_has(c, X86_FEATURE_TOPOEXT)) { if (c->phys_proc_id == o->phys_proc_id && + per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) && c->compute_unit_id == o->compute_unit_id) link_thread_siblings(cpu, i); } else if (c->phys_proc_id == o->phys_proc_id && -- cgit v0.10.2 From b453de02b786c63b8928ec822401468131db0a9b Mon Sep 17 00:00:00 2001 From: Hans Rosenfeld Date: Mon, 24 Jan 2011 16:05:41 +0100 Subject: x86, amd: Enable L3 cache index disable on family 0x15 AMD family 0x15 CPUs support L3 cache index disable, so enable it on them. Signed-off-by: Hans Rosenfeld Cc: LKML-Reference: <1295881543-572552-3-git-send-email-hans.rosenfeld@amd.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 0a99f71..a4f394c 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -85,6 +85,9 @@ int amd_cache_northbridges(void) boot_cpu_data.x86_mask >= 0x1)) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; + if (boot_cpu_data.x86 == 0x15) + amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; + return 0; } EXPORT_SYMBOL_GPL(amd_cache_northbridges); -- cgit v0.10.2 From 41b2610c3443e6c4760e61fc10eef73f96f9f6a5 Mon Sep 17 00:00:00 2001 From: Hans Rosenfeld Date: Mon, 24 Jan 2011 16:05:42 +0100 Subject: x86, amd: Extend AMD northbridge caching code to support "Link Control" devices "Link Control" devices (NB function 4) will be used by L3 cache partitioning on family 0x15. Signed-off-by: Hans Rosenfeld Cc: LKML-Reference: <1295881543-572552-4-git-send-email-hans.rosenfeld@amd.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 64dc82e..3e70700 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -26,6 +26,7 @@ extern void amd_get_nodes(struct bootnode *nodes); struct amd_northbridge { struct pci_dev *misc; + struct pci_dev *link; }; struct amd_northbridge_info { diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index a4f394c..4ae9a96 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -20,6 +20,11 @@ struct pci_device_id amd_nb_misc_ids[] = { }; EXPORT_SYMBOL(amd_nb_misc_ids); +static struct pci_device_id amd_nb_link_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) }, + {} +}; + const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { { 0x00, 0x18, 0x20 }, { 0xff, 0x00, 0x20 }, @@ -45,7 +50,7 @@ int amd_cache_northbridges(void) { int i = 0; struct amd_northbridge *nb; - struct pci_dev *misc; + struct pci_dev *misc, *link; if (amd_nb_num()) return 0; @@ -64,10 +69,12 @@ int amd_cache_northbridges(void) amd_northbridges.nb = nb; amd_northbridges.num = i; - misc = NULL; + link = misc = NULL; for (i = 0; i != amd_nb_num(); i++) { node_to_amd_nb(i)->misc = misc = next_northbridge(misc, amd_nb_misc_ids); + node_to_amd_nb(i)->link = link = + next_northbridge(link, amd_nb_link_ids); } /* some CPU families (e.g. family 0x11) do not support GART */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3adb06e..580de67 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -518,6 +518,7 @@ #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 #define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603 +#define PCI_DEVICE_ID_AMD_15H_NB_LINK 0x1604 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 -- cgit v0.10.2 From bd22a2f1982fa3e90ce7d5d011c37d88aa67e73c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:27 +0100 Subject: x86: Kill unused static boot_cpu_logical_apicid in smpboot.c Signed-off-by: Tejun Heo Reviewed-by: Pekka Enberg Acked-by: Yinghai Lu Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-2-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0cbe8c0..ee9203a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -165,8 +165,6 @@ static void unmap_cpu_to_node(int cpu) #endif #ifdef CONFIG_X86_32 -static int boot_cpu_logical_apicid; - u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; @@ -1096,9 +1094,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) * Setup boot CPU information */ smp_store_cpu_info(0); /* Final full version of the data */ -#ifdef CONFIG_X86_32 - boot_cpu_logical_apicid = logical_smp_processor_id(); -#endif + current_thread_info()->cpu = 0; /* needed? */ for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); -- cgit v0.10.2 From b78aa66b1fe4179d28e3f6502dc179773519a1bb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:28 +0100 Subject: x86: Drop x86_32 MAX_APICID Commit 56d91f13 (x86, acpi: Add MAX_LOCAL_APIC for 32bit) added MAX_LOCAL_APIC for x86_32 but didn't replace MAX_APICID users with it. Convert MAX_APICID users to MAX_LOCAL_APIC and drop MAX_APICID. Signed-off-by: Tejun Heo Reviewed-by: Pekka Enberg Acked-by: Yinghai Lu Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-3-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 0c90dd9..edc2a45 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -33,8 +33,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES]; extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #endif -#define MAX_APICID 256 - #else /* CONFIG_X86_64: */ #define MAX_MP_BUSSES 256 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ee9203a..53a85ba 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -72,7 +72,7 @@ #include #ifdef CONFIG_X86_32 -u8 apicid_2_node[MAX_APICID]; +u8 apicid_2_node[MAX_LOCAL_APIC]; #endif /* State of each CPU */ diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index ae96e7b..6027a48 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c @@ -57,7 +57,7 @@ struct node_memory_chunk_s { static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; static int __initdata num_memory_chunks; /* total number of memory chunks */ -static u8 __initdata apicid_to_pxm[MAX_APICID]; +static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC]; int acpi_numa __initdata; @@ -254,7 +254,7 @@ int __init get_memcfg_from_srat(void) printk(KERN_DEBUG "Number of memory chunks in system = %d\n", num_memory_chunks); - for (i = 0; i < MAX_APICID; i++) + for (i = 0; i < MAX_LOCAL_APIC; i++) apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); for (j = 0; j < num_memory_chunks; j++){ -- cgit v0.10.2 From 1245e1668c6e52bee76a423f8fab3bfcdd6226ae Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:29 +0100 Subject: x86: Make default_send_IPI_mask_sequence/allbutself_logical() 32bit only Both functions are used only in 32bit. Put them inside CONFIG_X86_32. This is to prepare for logical apicid handling update. - Cyrill Gorcunov spotted that I forgot to move declarations in ipi.h under CONFIG_X86_32. Fixed. Signed-off-by: Tejun Heo Reviewed-by: Pekka Enberg Reviewed-by: Cyrill Gorcunov Acked-by: Yinghai Lu Cc: eric.dumazet@gmail.com Cc: brgerst@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-4-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index 0b72282..615fa90 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); -extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, - int vector); -extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, - int vector); /* Avoid include hell */ #define NMI_VECTOR 0x02 @@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector) } #ifdef CONFIG_X86_32 +extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector); extern void default_send_IPI_mask_logical(const struct cpumask *mask, int vector); extern void default_send_IPI_allbutself(int vector); diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 08385e0..5037736 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, local_irq_restore(flags); } +#ifdef CONFIG_X86_32 + void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) { @@ -96,8 +98,6 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, local_irq_restore(flags); } -#ifdef CONFIG_X86_32 - /* * This is only used on smaller machines. */ -- cgit v0.10.2 From 4c321ff8a01a95badf5d5403d80ca4e0ab07fce7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:30 +0100 Subject: x86: Replace cpu_2_logical_apicid[] with early percpu variable Unlike x86_64, on x86_32, the mapping from cpu to logical apicid may vary depending on apic in use. cpu_2_logical_apicid[] array is used for this mapping. Replace it with early percpu variable x86_cpu_to_logical_apicid to make it better aligned with other mappings. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-5-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5e3969c..eb139ec 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -595,8 +595,4 @@ extern int default_check_phys_apicid_present(int phys_apicid); #endif /* CONFIG_X86_LOCAL_APIC */ -#ifdef CONFIG_X86_32 -extern u8 cpu_2_logical_apicid[NR_CPUS]; -#endif - #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4c2f63c..dc7c46a 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -38,6 +38,9 @@ static inline struct cpumask *cpu_core_mask(int cpu) DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); +#if defined(CONFIG_SMP) && defined(CONFIG_X86_32) +DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); +#endif /* Static state in head.S used to set up a CPU */ extern struct { diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 06c196d..126d5a3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -78,6 +78,17 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); #ifdef CONFIG_X86_32 + +#ifdef CONFIG_SMP +/* + * On x86_32, the mapping between cpu and logical apicid may vary + * depending on apic in use. The following early percpu variable is + * used for the mapping. This is where the behaviors of x86_64 and 32 + * actually diverge. Let's keep it ugly for now. + */ +DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID); +#endif + /* * Knob to control our willingness to enable the local APIC. * diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8593582..7cb73e1 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -534,7 +534,7 @@ static int es7000_cpu_to_logical_apicid(int cpu) #ifdef CONFIG_SMP if (cpu >= nr_cpu_ids) return BAD_APICID; - return cpu_2_logical_apicid[cpu]; + return early_per_cpu(x86_cpu_to_logical_apicid, cpu); #else return logical_smp_processor_id(); #endif diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 960f26a..4ed90c4 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -377,7 +377,7 @@ static inline int numaq_cpu_to_logical_apicid(int cpu) { if (cpu >= nr_cpu_ids) return BAD_APICID; - return cpu_2_logical_apicid[cpu]; + return early_per_cpu(x86_cpu_to_logical_apicid, cpu); } /* diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 9b41926..82cfc3e 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -206,7 +206,7 @@ static void summit_init_apic_ldr(void) /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = nr_cpu_ids; --i >= 0; ) { - lid = cpu_2_logical_apicid[i]; + lid = early_per_cpu(x86_cpu_to_logical_apicid, i); if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) ++count; } @@ -247,7 +247,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu) #ifdef CONFIG_SMP if (cpu >= nr_cpu_ids) return BAD_APICID; - return cpu_2_logical_apicid[cpu]; + return early_per_cpu(x86_cpu_to_logical_apicid, cpu); #else return logical_smp_processor_id(); #endif diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 002b796..b5147f0 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -225,6 +225,10 @@ void __init setup_per_cpu_areas(void) per_cpu(x86_bios_cpu_apicid, cpu) = early_per_cpu_map(x86_bios_cpu_apicid, cpu); #endif +#ifdef CONFIG_X86_32 + per_cpu(x86_cpu_to_logical_apicid, cpu) = + early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); +#endif #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + @@ -256,6 +260,9 @@ void __init setup_per_cpu_areas(void) early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; #endif +#ifdef CONFIG_X86_32 + early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; +#endif #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 53a85ba..df934e4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -165,9 +165,6 @@ static void unmap_cpu_to_node(int cpu) #endif #ifdef CONFIG_X86_32 -u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = - { [0 ... NR_CPUS-1] = BAD_APICID }; - static void map_cpu_to_logical_apicid(void) { int cpu = smp_processor_id(); @@ -177,13 +174,13 @@ static void map_cpu_to_logical_apicid(void) if (!node_online(node)) node = first_online_node; - cpu_2_logical_apicid[cpu] = apicid; + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = apicid; map_cpu_to_node(cpu, node); } void numa_remove_cpu(int cpu) { - cpu_2_logical_apicid[cpu] = BAD_APICID; + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = BAD_APICID; unmap_cpu_to_node(cpu); } #else -- cgit v0.10.2 From 6f802c4bfa2acf1bffa8341fe9084da0205d581d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:31 +0100 Subject: x86: Always use x86_cpu_to_logical_apicid for cpu -> logical apic id Currently, cpu -> logical apic id translation is done by apic->cpu_to_logical_apicid() callback which may or may not use x86_cpu_to_logical_apicid. This is unnecessary as it should always equal logical_smp_processor_id() which is known early during CPU bring up. Initialize x86_cpu_to_logical_apicid after apic->init_apic_ldr() in setup_local_APIC() and always use x86_cpu_to_logical_apicid for cpu -> logical apic id mapping. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-6-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 126d5a3..ae08246 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1248,6 +1248,14 @@ void __cpuinit setup_local_APIC(void) */ apic->init_apic_ldr(); +#ifdef CONFIG_X86_32 + /* + * APIC LDR is initialized. Fetch and store logical_apic_id. + */ + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = + logical_smp_processor_id(); +#endif + /* * Set Task Priority to 'accept all'. We never change this * later on. diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 5037736..cce91bf 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -73,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, local_irq_save(flags); for_each_cpu(query_cpu, mask) __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); + early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); local_irq_restore(flags); } @@ -92,8 +92,8 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, if (query_cpu == this_cpu) continue; __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); + early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); } local_irq_restore(flags); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index df934e4..ca20f6b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -168,19 +168,18 @@ static void unmap_cpu_to_node(int cpu) static void map_cpu_to_logical_apicid(void) { int cpu = smp_processor_id(); - int apicid = logical_smp_processor_id(); - int node = apic->apicid_to_node(apicid); + int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + int node; + node = apic->apicid_to_node(logical_apicid); if (!node_online(node)) node = first_online_node; - early_per_cpu(x86_cpu_to_logical_apicid, cpu) = apicid; map_cpu_to_node(cpu, node); } void numa_remove_cpu(int cpu) { - early_per_cpu(x86_cpu_to_logical_apicid, cpu) = BAD_APICID; unmap_cpu_to_node(cpu); } #else -- cgit v0.10.2 From 7632611f534340182c832d2b139cb19676f24e1a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:32 +0100 Subject: x86: Kill apic->cpu_to_logical_apicid() After the previous patch, apic->cpu_to_logical_apicid() is no longer used. Kill it. For apic types with custom cpu_to_logical_apicid() which is also used for other purposes, remove the function and modify its users to do the mapping directly. #ifdef's on CONFIG_SMP in es7000_32 and summit_32 are ignored during conversion as they are not used for UP kernels. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-7-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index eb139ec..d1aa0c3 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -307,7 +307,6 @@ struct apic { void (*setup_apic_routing)(void); int (*multi_timer_check)(int apic, int irq); int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); void (*setup_portio_remap)(void); @@ -557,12 +556,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma *retmap = *phys_map; } -/* Mapping from cpu number to logical apicid */ -static inline int default_cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - static inline int __default_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 09d3b17..5a9d11a 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -186,7 +186,6 @@ struct apic apic_flat = { .setup_apic_routing = NULL, .multi_timer_check = NULL, .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, @@ -338,7 +337,6 @@ struct apic apic_physflat = { .setup_apic_routing = NULL, .multi_timer_check = NULL, .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e31b9ff..f3d19b2 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void) return 0; } -static int noop_cpu_to_logical_apicid(int cpu) -{ - return 0; -} - static int noop_phys_pkg_id(int cpuid_apic, int index_msb) { return 0; @@ -155,7 +150,6 @@ struct apic apic_noop = { .multi_timer_check = NULL, .apicid_to_node = noop_apicid_to_node, - .cpu_to_logical_apicid = noop_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index cb804c5..4c62592 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -93,14 +93,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -/* Mapping from cpu number to logical apicid */ -static inline int bigsmp_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_physical_id(cpu); -} - static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ @@ -115,7 +107,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) /* As we are using single CPU as destination, pick only one CPU here */ static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) { - return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); + int cpu = cpumask_first(cpumask); + + if (cpu < nr_cpu_ids) + return cpu_physical_id(cpu); + return BAD_APICID; } static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, @@ -129,9 +125,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, */ for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; + return cpu_physical_id(cpu); } - return bigsmp_cpu_to_logical_apicid(cpu); + return BAD_APICID; } static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) @@ -220,7 +216,6 @@ struct apic apic_bigsmp = { .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = bigsmp_apicid_to_node, - .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 7cb73e1..6840681 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -528,18 +528,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) ++cpu_id; } -/* Mapping from cpu number to logical apicid */ -static int es7000_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return early_per_cpu(x86_cpu_to_logical_apicid, cpu); -#else - return logical_smp_processor_id(); -#endif -} - static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ @@ -561,7 +549,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) * The cpus in the mask must all be on the apic cluster. */ for_each_cpu(cpu, cpumask) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); + int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { WARN(1, "Not a valid mask!"); @@ -578,7 +566,7 @@ static unsigned int es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int apicid = es7000_cpu_to_logical_apicid(0); + int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) @@ -656,7 +644,6 @@ struct apic __refdata apic_es7000_cluster = { .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -721,7 +708,6 @@ struct apic __refdata apic_es7000 = { .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 4ed90c4..2b434d5 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask return physids_promote(0xFUL, retmap); } -static inline int numaq_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return early_per_cpu(x86_cpu_to_logical_apicid, cpu); -} - /* * Supporting over 60 cpus on NUMA-Q requires a locality-dependent * cpu to APIC ID relation to properly interact with the intelligent @@ -509,7 +502,6 @@ struct apic __refdata apic_numaq = { .setup_apic_routing = numaq_setup_apic_routing, .multi_timer_check = numaq_multi_timer_check, .apicid_to_node = numaq_apicid_to_node, - .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, .cpu_present_to_apicid = numaq_cpu_present_to_apicid, .apicid_to_cpu_present = numaq_apicid_to_cpu_present, .setup_portio_remap = numaq_setup_portio_remap, diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 99d2fe0..24a6828 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -131,7 +131,6 @@ struct apic apic_default = { .setup_apic_routing = setup_apic_flat_routing, .multi_timer_check = NULL, .apicid_to_node = default_apicid_to_node, - .cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 82cfc3e..1ef4c14 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -241,18 +241,6 @@ static int summit_apicid_to_node(int logical_apicid) #endif } -/* Mapping from cpu number to logical apicid */ -static inline int summit_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return early_per_cpu(x86_cpu_to_logical_apicid, cpu); -#else - return logical_smp_processor_id(); -#endif -} - static int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) @@ -286,7 +274,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) * The cpus in the mask must all be on the apic cluster. */ for_each_cpu(cpu, cpumask) { - int new_apicid = summit_cpu_to_logical_apicid(cpu); + int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { printk("%s: Not a valid mask!\n", __func__); @@ -301,7 +289,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int apicid = summit_cpu_to_logical_apicid(0); + int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) @@ -529,7 +517,6 @@ struct apic apic_summit = { .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = summit_apicid_to_node, - .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index cf69c59..badc1fd 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -207,7 +207,6 @@ struct apic apic_x2apic_cluster = { .setup_apic_routing = NULL, .multi_timer_check = NULL, .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8972f38..f28bf4c 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -196,7 +196,6 @@ struct apic apic_x2apic_phys = { .setup_apic_routing = NULL, .multi_timer_check = NULL, .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index bd16b58..6027620 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -339,7 +339,6 @@ struct apic __refdata apic_x2apic_uv_x = { .setup_apic_routing = NULL, .multi_timer_check = NULL, .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, -- cgit v0.10.2 From acb8bc09c6185e4d3d582d0076aaa6a89f19d8c5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:33 +0100 Subject: x86: Add apic->x86_32_early_logical_apicid() On x86_32, the mapping between cpu and logical apic ID differs depending on the specific apic implementation in use. The mapping is initialized while bringing up CPUs; however, this makes early inits ignore memory topology. Add a x86_32 specific apic->x86_32_early_logical_apicid() which is called early during boot to query the mapping. The mapping is later verified against the result of init_apic_ldr(). The method is allowed to return BAD_APICID if it can't be determined early. noop variant which always returns BAD_APICID is implemented and added to all x86_32 apic implementations. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-8-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index d1aa0c3..efb073b 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -354,6 +354,20 @@ struct apic { void (*icr_write)(u32 low, u32 high); void (*wait_icr_idle)(void); u32 (*safe_wait_icr_idle)(void); + +#ifdef CONFIG_X86_32 + /* + * Called very early during boot from get_smp_config(). It should + * return the logical apicid. x86_[bios]_cpu_to_apicid is + * initialized before this function is called. + * + * If logical apicid can't be determined that early, the function + * may return BAD_APICID. Logical apicid will be configured after + * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity + * won't be applied properly during early boot in this case. + */ + int (*x86_32_early_logical_apicid)(int cpu); +#endif }; /* @@ -501,6 +515,11 @@ extern struct apic apic_noop; extern struct apic apic_default; +static inline int noop_x86_32_early_logical_apicid(int cpu) +{ + return BAD_APICID; +} + /* * Set up the logical destination ID. * diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ae08246..3127079 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1250,8 +1250,13 @@ void __cpuinit setup_local_APIC(void) #ifdef CONFIG_X86_32 /* - * APIC LDR is initialized. Fetch and store logical_apic_id. + * APIC LDR is initialized. If logical_apicid mapping was + * initialized during get_smp_config(), make sure it matches the + * actual value. */ + i = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + WARN_ON(i != BAD_APICID && i != logical_smp_processor_id()); + /* always use the value from LDR */ early_per_cpu(x86_cpu_to_logical_apicid, cpu) = logical_smp_processor_id(); #endif @@ -1991,7 +1996,10 @@ void __cpuinit generic_processor_info(int apicid, int version) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; #endif - +#ifdef CONFIG_X86_32 + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = + apic->x86_32_early_logical_apicid(cpu); +#endif set_cpu_possible(cpu, true); set_cpu_present(cpu, true); } diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index f3d19b2..0309c58 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -191,4 +191,8 @@ struct apic apic_noop = { .icr_write = noop_apic_icr_write, .wait_icr_idle = noop_apic_wait_icr_idle, .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, + +#ifdef CONFIG_X86_32 + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, +#endif }; diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 4c62592..dd32a9b 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -251,4 +251,6 @@ struct apic apic_bigsmp = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, }; diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 6840681..0ffc1ec 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -682,6 +682,8 @@ struct apic __refdata apic_es7000_cluster = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, }; struct apic __refdata apic_es7000 = { @@ -744,4 +746,6 @@ struct apic __refdata apic_es7000 = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, }; diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 2b434d5..f1a8b12 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -539,4 +539,6 @@ struct apic __refdata apic_numaq = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, }; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 24a6828..40be7c3 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -166,6 +166,8 @@ struct apic apic_default = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, }; extern struct apic apic_numaq; diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 1ef4c14..172c498 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -552,4 +552,6 @@ struct apic apic_summit = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, }; -- cgit v0.10.2 From 3f6f6798889d50ec7ca8eef1d100cda37dc658ea Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:34 +0100 Subject: x86: Implement the default x86_32_early_logical_apicid() Implement x86_32_early_logical_apicid() for the default apic flat routing. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-9-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 40be7c3..0f9a9ab 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void) apic->setup_apic_routing(); } +static int default_x86_32_early_logical_apicid(int cpu) +{ + return 1 << cpu; +} + static void setup_apic_flat_routing(void) { #ifdef CONFIG_X86_IO_APIC @@ -167,7 +172,7 @@ struct apic apic_default = { .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, - .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, }; extern struct apic apic_numaq; -- cgit v0.10.2 From 12bf24a47c1a095233cc8a8b863b509a0d8e0f2c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:35 +0100 Subject: x86: Implement x86_32_early_logical_apicid() for bigsmp_32 Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-10-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index dd32a9b..bc7ed04 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit) return 1; } +static int bigsmp_early_logical_apicid(int cpu) +{ + /* on bigsmp, logical apicid is the same as physical */ + return early_per_cpu(x86_cpu_to_apicid, cpu); +} + static inline unsigned long calculate_ldr(int cpu) { unsigned long val, id; @@ -252,5 +258,5 @@ struct apic apic_bigsmp = { .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, - .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, }; -- cgit v0.10.2 From 3b39d937843e071c59b3aeecbf7de4750f095b12 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:36 +0100 Subject: x86: Implement x86_32_early_logical_apicid() for summit_32 Factor out logical apic id calculation from summit_init_apic_ldr() and use it for the x86_32_early_logical_apicid() callback. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-11-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 172c498..8c91473 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit) return 1; } -static void summit_init_apic_ldr(void) +static int summit_early_logical_apicid(int cpu) { - unsigned long val, id; int count = 0; - u8 my_id = (u8)hard_smp_processor_id(); + u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); u8 my_cluster = APIC_CLUSTER(my_id); #ifdef CONFIG_SMP u8 lid; @@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void) /* We only have a 4 wide bitmap in cluster mode. If a deranged * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); - id = my_cluster | (1UL << count); + return my_cluster | (1UL << count); +} + +static void summit_init_apic_ldr(void) +{ + int cpu = smp_processor_id(); + unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + unsigned long val; + apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); @@ -553,5 +560,5 @@ struct apic apic_summit = { .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, - .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_early_logical_apicid = summit_early_logical_apicid, }; -- cgit v0.10.2 From df04cf011b0657ddc782b48d455f7e232b9be41c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:37 +0100 Subject: x86: Implement x86_32_early_logical_apicid() for numaq_32 Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-12-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 0ffc1ec..5c53d05 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit) return physid_isset(bit, phys_cpu_present_map); } +static int es7000_early_logical_apicid(int cpu) +{ + /* on es7000, logical apicid is the same as physical */ + return early_per_cpu(x86_bios_cpu_apicid, cpu); +} + static unsigned long calculate_ldr(int cpu) { unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); @@ -683,7 +689,7 @@ struct apic __refdata apic_es7000_cluster = { .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, - .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_early_logical_apicid = es7000_early_logical_apicid, }; struct apic __refdata apic_es7000 = { @@ -747,5 +753,5 @@ struct apic __refdata apic_es7000 = { .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, - .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_early_logical_apicid = es7000_early_logical_apicid, }; -- cgit v0.10.2 From 89e5dc218e084e13a3996db6693b01478912f4ee Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:38 +0100 Subject: x86: Replace apic->apicid_to_node() with ->x86_32_numa_cpu_node() apic->apicid_to_node() is 32bit specific apic operation which determines NUMA node for a CPU. Depending on the APIC implementation, it can be easier to determine NUMA node from either physical or logical apicid. Currently, ->apicid_to_node() takes @logical_apicid and calls hard_smp_processor_id() if the physical apicid is needed. This prevents NUMA mapping from being queried from a different CPU, which in turn makes it impossible to initialize NUMA mapping before SMP bringup. This patch replaces apic->apicid_to_node() with ->x86_32_numa_cpu_node() which takes @cpu, from which both logical and physical apicids can easily be determined. While at it, drop duplicate implementations from bigsmp_32 and summit_32, and use the default one. Signed-off-by: Tejun Heo Reviewed-by: Pekka Enberg Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-13-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index efb073b..ad30ca4 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -306,7 +306,6 @@ struct apic { void (*setup_apic_routing)(void); int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); int (*cpu_present_to_apicid)(int mps_cpu); void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); void (*setup_portio_remap)(void); @@ -367,6 +366,9 @@ struct apic { * won't be applied properly during early boot in this case. */ int (*x86_32_early_logical_apicid)(int cpu); + + /* determine CPU -> NUMA node mapping */ + int (*x86_32_numa_cpu_node)(int cpu); #endif }; @@ -539,7 +541,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -extern int default_apicid_to_node(int logical_apicid); +extern int default_x86_32_numa_cpu_node(int cpu); #endif diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3127079..0f4f3c1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2020,10 +2020,14 @@ void default_init_apic_ldr(void) } #ifdef CONFIG_X86_32 -int default_apicid_to_node(int logical_apicid) +int default_x86_32_numa_cpu_node(int cpu) { -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; +#ifdef CONFIG_NUMA + int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); + + if (apicid != BAD_APICID) + return apicid_2_node[apicid]; + return NUMA_NO_NODE; #else return 0; #endif diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 5a9d11a..5652d31 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -185,7 +185,6 @@ struct apic apic_flat = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, @@ -336,7 +335,6 @@ struct apic apic_physflat = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index 0309c58..f1baa2d 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -108,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) cpumask_set_cpu(cpu, retmask); } -int noop_apicid_to_node(int logical_apicid) -{ - /* we're always on node 0 */ - return 0; -} - static u32 noop_apic_read(u32 reg) { WARN_ON_ONCE((cpu_has_apic && !disable_apic)); @@ -125,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v) WARN_ON_ONCE(cpu_has_apic && !disable_apic); } +#ifdef CONFIG_X86_32 +static int noop_x86_32_numa_cpu_node(int cpu) +{ + /* we're always on node 0 */ + return 0; +} +#endif + struct apic apic_noop = { .name = "noop", .probe = noop_probe, @@ -148,7 +150,6 @@ struct apic apic_noop = { .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = noop_apicid_to_node, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, @@ -194,5 +195,6 @@ struct apic apic_noop = { #ifdef CONFIG_X86_32 .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node, #endif }; diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index bc7ed04..541a2e4 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -86,11 +86,6 @@ static void bigsmp_setup_apic_routing(void) nr_ioapics); } -static int bigsmp_apicid_to_node(int logical_apicid) -{ - return apicid_2_node[hard_smp_processor_id()]; -} - static int bigsmp_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) @@ -221,7 +216,6 @@ struct apic apic_bigsmp = { .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = bigsmp_apicid_to_node, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, @@ -259,4 +253,5 @@ struct apic apic_bigsmp = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, + .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 5c53d05..3e9de48 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -510,12 +510,11 @@ static void es7000_setup_apic_routing(void) nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); } -static int es7000_apicid_to_node(int logical_apicid) +static int es7000_numa_cpu_node(int cpu) { return 0; } - static int es7000_cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) @@ -649,7 +648,6 @@ struct apic __refdata apic_es7000_cluster = { .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = es7000_apicid_to_node, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -690,6 +688,7 @@ struct apic __refdata apic_es7000_cluster = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = es7000_early_logical_apicid, + .x86_32_numa_cpu_node = es7000_numa_cpu_node, }; struct apic __refdata apic_es7000 = { @@ -715,7 +714,6 @@ struct apic __refdata apic_es7000 = { .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = es7000_apicid_to_node, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -754,4 +752,5 @@ struct apic __refdata apic_es7000 = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = es7000_early_logical_apicid, + .x86_32_numa_cpu_node = es7000_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index f1a8b12..6273eee 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -391,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid) return logical_apicid >> 4; } +static int numaq_numa_cpu_node(int cpu) +{ + int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + + if (logical_apicid != BAD_APICID) + return numaq_apicid_to_node(logical_apicid); + return NUMA_NO_NODE; +} + static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) { int node = numaq_apicid_to_node(logical_apicid); @@ -501,7 +510,6 @@ struct apic __refdata apic_numaq = { .ioapic_phys_id_map = numaq_ioapic_phys_id_map, .setup_apic_routing = numaq_setup_apic_routing, .multi_timer_check = numaq_multi_timer_check, - .apicid_to_node = numaq_apicid_to_node, .cpu_present_to_apicid = numaq_cpu_present_to_apicid, .apicid_to_cpu_present = numaq_apicid_to_cpu_present, .setup_portio_remap = numaq_setup_portio_remap, @@ -541,4 +549,5 @@ struct apic __refdata apic_numaq = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_numa_cpu_node = numaq_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 0f9a9ab..fc84c7b 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -135,7 +135,6 @@ struct apic apic_default = { .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = setup_apic_flat_routing, .multi_timer_check = NULL, - .apicid_to_node = default_apicid_to_node, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, @@ -173,6 +172,7 @@ struct apic apic_default = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, + .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; extern struct apic apic_numaq; diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 8c91473..e4b8059 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -239,15 +239,6 @@ static void summit_setup_apic_routing(void) nr_ioapics); } -static int summit_apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} - static int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) @@ -523,7 +514,6 @@ struct apic apic_summit = { .ioapic_phys_id_map = summit_ioapic_phys_id_map, .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = summit_apicid_to_node, .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -561,4 +551,5 @@ struct apic apic_summit = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = summit_early_logical_apicid, + .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index badc1fd..90949bb 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -206,7 +206,6 @@ struct apic apic_x2apic_cluster = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index f28bf4c..c7e6d66 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -195,7 +195,6 @@ struct apic apic_x2apic_phys = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 6027620..3c28928 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -338,7 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ca20f6b..5319cdd 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -168,10 +168,9 @@ static void unmap_cpu_to_node(int cpu) static void map_cpu_to_logical_apicid(void) { int cpu = smp_processor_id(); - int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); int node; - node = apic->apicid_to_node(logical_apicid); + node = apic->x86_32_numa_cpu_node(cpu); if (!node_online(node)) node = first_online_node; -- cgit v0.10.2 From bbc9e2f452d9c4b166d1f9a78d941d80173312fe Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:39 +0100 Subject: x86: Unify cpu/apicid <-> NUMA node mapping between 32 and 64bit The mapping between cpu/apicid and node is done via apicid_to_node[] on 64bit and apicid_2_node[] + apic->x86_32_numa_cpu_node() on 32bit. This difference makes it difficult to further unify 32 and 64bit NUMA handling. This patch unifies it by replacing both apicid_to_node[] and apicid_2_node[] with __apicid_to_node[] array, which is accessed by two accessors - set_apicid_to_node() and numa_cpu_node(). On 64bit, numa_cpu_node() always consults __apicid_to_node[] directly while 32bit goes through apic->numa_cpu_node() method to allow apic implementations to override it. srat_detect_node() for amd cpus contains workaround for broken NUMA configuration which assumes relationship between APIC ID, HT node ID and NUMA topology. Leave it to access __apicid_to_node[] directly as mapping through CPU might result in undesirable behavior change. The comment is reformatted and updated to note the ugliness. Signed-off-by: Tejun Heo Reviewed-by: Pekka Enberg Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-14-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar Cc: David Rientjes diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index edc2a45..9c7d95f 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -25,7 +25,6 @@ extern int pic_mode; #define MAX_IRQ_SOURCES 256 extern unsigned int def_to_bigsmp; -extern u8 apicid_2_node[]; #ifdef CONFIG_X86_NUMAQ extern int mp_bus_id_to_node[MAX_MP_BUSSES]; diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 27da400..5e01c76 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -1,5 +1,33 @@ +#ifndef _ASM_X86_NUMA_H +#define _ASM_X86_NUMA_H + +#include + +#ifdef CONFIG_NUMA +/* + * __apicid_to_node[] stores the raw mapping between physical apicid and + * node and is used to initialize cpu_to_node mapping. + * + * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus + * should be accessed by the accessors - set_apicid_to_node() and + * numa_cpu_node(). + */ +extern s16 __apicid_to_node[MAX_LOCAL_APIC]; + +static inline void set_apicid_to_node(int apicid, s16 node) +{ + __apicid_to_node[apicid] = node; +} +#else /* CONFIG_NUMA */ +static inline void set_apicid_to_node(int apicid, s16 node) +{ +} +#endif /* CONFIG_NUMA */ + #ifdef CONFIG_X86_32 # include "numa_32.h" #else # include "numa_64.h" #endif + +#endif /* _ASM_X86_NUMA_H */ diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index b0ef2b4..cdf8043 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h @@ -6,6 +6,12 @@ extern int numa_off; extern int pxm_to_nid(int pxm); extern void numa_remove_cpu(int cpu); +#ifdef CONFIG_NUMA +extern int __cpuinit numa_cpu_node(int apicid); +#else /* CONFIG_NUMA */ +static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } +#endif /* CONFIG_NUMA */ + #ifdef CONFIG_HIGHMEM extern void set_highmem_pages_init(void); #else diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 0493be3..4982a9c 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -2,7 +2,6 @@ #define _ASM_X86_NUMA_64_H #include -#include struct bootnode { u64 start; @@ -17,8 +16,6 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks, extern void numa_init_array(void); extern int numa_off; -extern s16 apicid_to_node[MAX_LOCAL_APIC]; - extern unsigned long numa_free_all_bootmem(void); extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); @@ -32,6 +29,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, #define NODE_MIN_SIZE (4*1024*1024) extern void __init init_cpu_to_node(void); +extern int __cpuinit numa_cpu_node(int cpu); extern void __cpuinit numa_set_node(int cpu, int node); extern void __cpuinit numa_clear_node(int cpu); extern void __cpuinit numa_add_cpu(int cpu); @@ -44,6 +42,7 @@ void numa_emu_cmdline(char *); #endif /* CONFIG_NUMA_EMU */ #else static inline void init_cpu_to_node(void) { } +static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } static inline void numa_add_cpu(int cpu, int node) { } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a7113..a7bca59 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -589,11 +589,10 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) nid = acpi_get_node(handle); if (nid == -1 || !node_online(nid)) return; + set_apicid_to_node(physid, nid); #ifdef CONFIG_X86_64 - apicid_to_node[physid] = nid; numa_set_node(cpu, nid); #else /* CONFIG_X86_32 */ - apicid_2_node[physid] = nid; cpu_to_node_map[cpu] = nid; #endif diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 0f4f3c1..4686ea5 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2026,7 +2026,7 @@ int default_x86_32_numa_cpu_node(int cpu) int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); if (apicid != BAD_APICID) - return apicid_2_node[apicid]; + return __apicid_to_node[apicid]; return NUMA_NO_NODE; #else return 0; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb..3cce8f2 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -234,17 +234,21 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) #endif #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +/* + * To workaround broken NUMA config. Read the comment in + * srat_detect_node(). + */ static int __cpuinit nearby_node(int apicid) { int i, node; for (i = apicid - 1; i >= 0; i--) { - node = apicid_to_node[i]; + node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { - node = apicid_to_node[i]; + node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } @@ -339,26 +343,35 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) int node; unsigned apicid = c->apicid; - node = per_cpu(cpu_llc_id, cpu); + node = numa_cpu_node(cpu); + if (node == NUMA_NO_NODE) + node = per_cpu(cpu_llc_id, cpu); - if (apicid_to_node[apicid] != NUMA_NO_NODE) - node = apicid_to_node[apicid]; if (!node_online(node)) { - /* Two possibilities here: - - The CPU is missing memory and no node was created. - In that case try picking one from a nearby CPU - - The APIC IDs differ from the HyperTransport node IDs - which the K8 northbridge parsing fills in. - Assume they are all increased by a constant offset, - but in the same order as the HT nodeids. - If that doesn't result in a usable node fall back to the - path for the previous case. */ - + /* + * Two possibilities here: + * + * - The CPU is missing memory and no node was created. In + * that case try picking one from a nearby CPU. + * + * - The APIC IDs differ from the HyperTransport node IDs + * which the K8 northbridge parsing fills in. Assume + * they are all increased by a constant offset, but in + * the same order as the HT nodeids. If that doesn't + * result in a usable node fall back to the path for the + * previous case. + * + * This workaround operates directly on the mapping between + * APIC ID and NUMA node, assuming certain relationship + * between APIC ID, HT node ID and NUMA topology. As going + * through CPU mapping may alter the outcome, directly + * access __apicid_to_node[]. + */ int ht_nodeid = c->initial_apicid; if (ht_nodeid >= 0 && - apicid_to_node[ht_nodeid] != NUMA_NO_NODE) - node = apicid_to_node[ht_nodeid]; + __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) + node = __apicid_to_node[ht_nodeid]; /* Pick a nearby node */ if (!node_online(node)) node = nearby_node(apicid); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d16c2c5..6052004 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -279,11 +279,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) unsigned node; int cpu = smp_processor_id(); - int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; /* Don't do the funky fallback heuristics the AMD version employs for now. */ - node = apicid_to_node[apicid]; + node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5319cdd..b7cfce5 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -71,10 +71,6 @@ #include #include -#ifdef CONFIG_X86_32 -u8 apicid_2_node[MAX_LOCAL_APIC]; -#endif - /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; @@ -170,7 +166,7 @@ static void map_cpu_to_logical_apicid(void) int cpu = smp_processor_id(); int node; - node = apic->x86_32_numa_cpu_node(cpu); + node = numa_cpu_node(cpu); if (!node_online(node)) node = first_online_node; diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index f21962c..c7fae38 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -247,7 +247,7 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) __acpi_map_pxm_to_node(nid, i); #endif } - memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); + memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); } #endif /* CONFIG_NUMA_EMU */ @@ -285,7 +285,7 @@ int __init amd_scan_nodes(void) nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); for (j = apicid_base; j < cores + apicid_base; j++) - apicid_to_node[(i << bits) + j] = i; + set_apicid_to_node((i << bits) + j, i); setup_node_bootmem(i, nodes[i].start, nodes[i].end); } diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index ebf6d78..480b357 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -26,8 +26,12 @@ static __init int numa_setup(char *opt) early_param("numa", numa_setup); /* - * Which logical CPUs are on which nodes + * apicid, cpu, node mappings */ +s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { + [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE +}; + cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 84a3e4c..8d91d22 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); static unsigned long kva_start_pfn; static unsigned long kva_pages; + +int __cpuinit numa_cpu_node(int cpu) +{ + return apic->x86_32_numa_cpu_node(cpu); +} + /* * FLAT - support for basic PC memory model with discontig enabled, essentially * a single node with all available processors in it with a flat diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea155..1e1026f 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -26,10 +26,6 @@ EXPORT_SYMBOL(node_data); struct memnode memnode; -s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; - static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; @@ -716,12 +712,8 @@ void __init init_cpu_to_node(void) BUG_ON(cpu_to_apicid == NULL); for_each_possible_cpu(cpu) { - int node; - u16 apicid = cpu_to_apicid[cpu]; + int node = numa_cpu_node(cpu); - if (apicid == BAD_APICID) - continue; - node = apicid_to_node[apicid]; if (node == NUMA_NO_NODE) continue; if (!node_online(node)) @@ -731,6 +723,14 @@ void __init init_cpu_to_node(void) } #endif +int __cpuinit numa_cpu_node(int cpu) +{ + int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); + + if (apicid != BAD_APICID) + return __apicid_to_node[apicid]; + return NUMA_NO_NODE; +} void __cpuinit numa_set_node(int cpu, int node) { @@ -776,13 +776,9 @@ void __cpuinit numa_remove_cpu(int cpu) void __cpuinit numa_add_cpu(int cpu) { unsigned long addr; - u16 apicid; - int physnid; - int nid = NUMA_NO_NODE; + int physnid, nid; - apicid = early_per_cpu(x86_cpu_to_apicid, cpu); - if (apicid != BAD_APICID) - nid = apicid_to_node[apicid]; + nid = numa_cpu_node(cpu); if (nid == NUMA_NO_NODE) nid = early_cpu_to_node(cpu); BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 6027a48..48651c6 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c @@ -255,7 +255,7 @@ int __init get_memcfg_from_srat(void) num_memory_chunks); for (i = 0; i < MAX_LOCAL_APIC; i++) - apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); + set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); for (j = 0; j < num_memory_chunks; j++){ struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 603d285..9a97261 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -79,7 +79,7 @@ static __init void bad_srat(void) printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; for (i = 0; i < MAX_LOCAL_APIC; i++) - apicid_to_node[i] = NUMA_NO_NODE; + set_apicid_to_node(i, NUMA_NO_NODE); for (i = 0; i < MAX_NUMNODES; i++) { nodes[i].start = nodes[i].end = 0; nodes_add[i].start = nodes_add[i].end = 0; @@ -138,7 +138,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); return; } - apicid_to_node[apic_id] = node; + set_apicid_to_node(apic_id, node); node_set(node, cpu_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", @@ -178,7 +178,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } - apicid_to_node[apic_id] = node; + set_apicid_to_node(apic_id, node); node_set(node, cpu_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", @@ -521,7 +521,7 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) * node, it must now point to the fake node ID. */ for (j = 0; j < MAX_LOCAL_APIC; j++) - if (apicid_to_node[j] == nid && + if (__apicid_to_node[j] == nid && fake_apicid_to_node[j] == NUMA_NO_NODE) fake_apicid_to_node[j] = i; } @@ -532,13 +532,13 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) * value. */ for (i = 0; i < MAX_LOCAL_APIC; i++) - if (apicid_to_node[i] != NUMA_NO_NODE && + if (__apicid_to_node[i] != NUMA_NO_NODE && fake_apicid_to_node[i] == NUMA_NO_NODE) fake_apicid_to_node[i] = 0; for (i = 0; i < num_nodes; i++) __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); - memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); + memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); nodes_clear(nodes_parsed); for (i = 0; i < num_nodes; i++) -- cgit v0.10.2 From 645a79195f66eb68ef3ab2b21d9829ac3aa085a9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:40 +0100 Subject: x86: Unify CPU -> NUMA node mapping between 32 and 64bit Unlike 64bit, 32bit has been using its own cpu_to_node_map[] for CPU -> NUMA node mapping. Replace it with early_percpu variable x86_cpu_to_node_map and share the mapping code with 64bit. * USE_PERCPU_NUMA_NODE_ID is now enabled for 32bit too. * x86_cpu_to_node_map and numa_set/clear_node() are moved from numa_64 to numa. For now, on 32bit, x86_cpu_to_node_map is initialized with 0 instead of NUMA_NO_NODE. This is to avoid introducing unexpected behavior change and will be updated once init path is unified. * srat_detect_node() is now enabled for x86_32 too. It calls numa_set_node() and initializes the mapping making explicit cpu_to_node_map[] updates from map/unmap_cpu_to_node() unnecessary. Signed-off-by: Tejun Heo Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-15-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar Cc: David Rientjes diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d5ed94d..95c36c4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1705,7 +1705,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID depends on NUMA config USE_PERCPU_NUMA_NODE_ID - def_bool X86_64 + def_bool y depends on NUMA menu "Power management and ACPI options" diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 5e01c76..2b21fff 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -30,4 +30,12 @@ static inline void set_apicid_to_node(int apicid, s16 node) # include "numa_64.h" #endif +#ifdef CONFIG_NUMA +extern void __cpuinit numa_set_node(int cpu, int node); +extern void __cpuinit numa_clear_node(int cpu); +#else /* CONFIG_NUMA */ +static inline void numa_set_node(int cpu, int node) { } +static inline void numa_clear_node(int cpu) { } +#endif /* CONFIG_NUMA */ + #endif /* _ASM_X86_NUMA_H */ diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 4982a9c..6ead9f3 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -30,8 +30,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, extern void __init init_cpu_to_node(void); extern int __cpuinit numa_cpu_node(int cpu); -extern void __cpuinit numa_set_node(int cpu, int node); -extern void __cpuinit numa_clear_node(int cpu); extern void __cpuinit numa_add_cpu(int cpu); extern void __cpuinit numa_remove_cpu(int cpu); @@ -43,8 +41,6 @@ void numa_emu_cmdline(char *); #else static inline void init_cpu_to_node(void) { } static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } -static inline void numa_set_node(int cpu, int node) { } -static inline void numa_clear_node(int cpu) { } static inline void numa_add_cpu(int cpu, int node) { } static inline void numa_remove_cpu(int cpu) { } #endif diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 21899cc3..b101c17 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -47,21 +47,6 @@ #include -#ifdef CONFIG_X86_32 - -/* Mappings between logical cpu number and node number */ -extern int cpu_to_node_map[]; - -/* Returns the number of the node containing CPU 'cpu' */ -static inline int __cpu_to_node(int cpu) -{ - return cpu_to_node_map[cpu]; -} -#define early_cpu_to_node __cpu_to_node -#define cpu_to_node __cpu_to_node - -#else /* CONFIG_X86_64 */ - /* Mappings between logical cpu number and node number */ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); @@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu) #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ -#endif /* CONFIG_X86_64 */ - /* Mappings between node number and cpus on that node. */ extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a7bca59..a2c5121 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -590,12 +590,7 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) if (nid == -1 || !node_online(nid)) return; set_apicid_to_node(physid, nid); -#ifdef CONFIG_X86_64 numa_set_node(cpu, nid); -#else /* CONFIG_X86_32 */ - cpu_to_node_map[cpu] = nid; -#endif - #endif } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3cce8f2..77858fd 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -233,7 +233,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) } #endif -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA /* * To workaround broken NUMA config. Read the comment in * srat_detect_node(). @@ -338,7 +338,7 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id); static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA int cpu = smp_processor_id(); int node; unsigned apicid = c->apicid; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 6052004..df86bc8 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -276,7 +276,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index b5147f0..71f4727 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -233,6 +233,7 @@ void __init setup_per_cpu_areas(void) per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; +#endif #ifdef CONFIG_NUMA per_cpu(x86_cpu_to_node_map, cpu) = early_per_cpu_map(x86_cpu_to_node_map, cpu); @@ -246,7 +247,6 @@ void __init setup_per_cpu_areas(void) */ set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); #endif -#endif /* * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. @@ -263,7 +263,7 @@ void __init setup_per_cpu_areas(void) #ifdef CONFIG_X86_32 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; #endif -#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) +#ifdef CONFIG_NUMA early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b7cfce5..2c20382 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -133,16 +133,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); atomic_t init_deasserted; #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) -/* which node each logical CPU is on */ -int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; -EXPORT_SYMBOL(cpu_to_node_map); - /* set up a mapping between cpu and node. */ static void map_cpu_to_node(int cpu, int node) { printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); cpumask_set_cpu(cpu, node_to_cpumask_map[node]); - cpu_to_node_map[cpu] = node; } /* undo a mapping between cpu and node. */ @@ -153,7 +148,6 @@ static void unmap_cpu_to_node(int cpu) printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); for (node = 0; node < MAX_NUMNODES; node++) cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); - cpu_to_node_map[cpu] = 0; } #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ #define map_cpu_to_node(cpu, node) ({}) diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 480b357..187810b 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -36,6 +36,44 @@ cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); /* + * Map cpu index to node index + */ +#ifdef CONFIG_X86_32 +DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, 0); +#else +DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); +#endif +EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); + +void __cpuinit numa_set_node(int cpu, int node) +{ + int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); + + /* early setting, no percpu area yet */ + if (cpu_to_node_map) { + cpu_to_node_map[cpu] = node; + return; + } + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { + printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); + dump_stack(); + return; + } +#endif + per_cpu(x86_cpu_to_node_map, cpu) = node; + + if (node != NUMA_NO_NODE) + set_cpu_numa_node(cpu, node); +} + +void __cpuinit numa_clear_node(int cpu) +{ + numa_set_node(cpu, NUMA_NO_NODE); +} + +/* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * @@ -62,6 +100,37 @@ void __init setup_node_to_cpumask_map(void) } #ifdef CONFIG_DEBUG_PER_CPU_MAPS + +int __cpu_to_node(int cpu) +{ + if (early_per_cpu_ptr(x86_cpu_to_node_map)) { + printk(KERN_WARNING + "cpu_to_node(%d): usage too early!\n", cpu); + dump_stack(); + return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; + } + return per_cpu(x86_cpu_to_node_map, cpu); +} +EXPORT_SYMBOL(__cpu_to_node); + +/* + * Same function as cpu_to_node() but used if called before the + * per_cpu areas are setup. + */ +int early_cpu_to_node(int cpu) +{ + if (early_per_cpu_ptr(x86_cpu_to_node_map)) + return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; + + if (!cpu_possible(cpu)) { + printk(KERN_WARNING + "early_cpu_to_node(%d): no per_cpu area!\n", cpu); + dump_stack(); + return NUMA_NO_NODE; + } + return per_cpu(x86_cpu_to_node_map, cpu); +} + /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ @@ -84,4 +153,5 @@ const struct cpumask *cpumask_of_node(int node) return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); -#endif + +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 1e1026f..f545940 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -30,12 +30,6 @@ static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; /* - * Map cpu index to node index - */ -DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); - -/* * Given a shift value, try to populate memnodemap[] * Returns : * 1 if OK @@ -732,34 +726,6 @@ int __cpuinit numa_cpu_node(int cpu) return NUMA_NO_NODE; } -void __cpuinit numa_set_node(int cpu, int node) -{ - int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); - - /* early setting, no percpu area yet */ - if (cpu_to_node_map) { - cpu_to_node_map[cpu] = node; - return; - } - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { - printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); - dump_stack(); - return; - } -#endif - per_cpu(x86_cpu_to_node_map, cpu) = node; - - if (node != NUMA_NO_NODE) - set_cpu_numa_node(cpu, node); -} - -void __cpuinit numa_clear_node(int cpu) -{ - numa_set_node(cpu, NUMA_NO_NODE); -} - #ifndef CONFIG_DEBUG_PER_CPU_MAPS #ifndef CONFIG_NUMA_EMU @@ -887,37 +853,6 @@ void __cpuinit numa_remove_cpu(int cpu) { numa_set_cpumask(cpu, 0); } - -int __cpu_to_node(int cpu) -{ - if (early_per_cpu_ptr(x86_cpu_to_node_map)) { - printk(KERN_WARNING - "cpu_to_node(%d): usage too early!\n", cpu); - dump_stack(); - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - } - return per_cpu(x86_cpu_to_node_map, cpu); -} -EXPORT_SYMBOL(__cpu_to_node); - -/* - * Same function as cpu_to_node() but used if called before the - * per_cpu areas are setup. - */ -int early_cpu_to_node(int cpu) -{ - if (early_per_cpu_ptr(x86_cpu_to_node_map)) - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - - if (!cpu_possible(cpu)) { - printk(KERN_WARNING - "early_cpu_to_node(%d): no per_cpu area!\n", cpu); - dump_stack(); - return NUMA_NO_NODE; - } - return per_cpu(x86_cpu_to_node_map, cpu); -} - /* * --------- end of debug versions of the numa functions --------- */ -- cgit v0.10.2 From de2d9445f1627830ed2ebd00ee9d851986c940b5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:41 +0100 Subject: x86: Unify node_to_cpumask_map handling between 32 and 64bit x86_32 has been managing node_to_cpumask_map explicitly from map_cpu_to_node() and friends in a rather ugly way. With previous changes, it's now possible to share the code with 64bit. * When CONFIG_NUMA_EMU is disabled, numa_add/remove_cpu() are implemented in numa.c and shared by 32 and 64bit. CONFIG_NUMA_EMU versions still live in numa_64.c. NUMA_EMU's dependency on 64bit is planned to be removed and the above should go away together. * identify_cpu() now calls numa_add_cpu() for 32bit too. This makes the explicit mask management from map_cpu_to_node() unnecessary. * The whole x86_32 specific map_cpu_to_node() chunk is no longer necessary. Dropped. Signed-off-by: Tejun Heo Reviewed-by: Pekka Enberg Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-16-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar Cc: David Rientjes Cc: Shaohui Zheng diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 2b21fff..d3964b2 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_NUMA_H #define _ASM_X86_NUMA_H +#include #include #ifdef CONFIG_NUMA @@ -33,9 +34,17 @@ static inline void set_apicid_to_node(int apicid, s16 node) #ifdef CONFIG_NUMA extern void __cpuinit numa_set_node(int cpu, int node); extern void __cpuinit numa_clear_node(int cpu); +extern void __cpuinit numa_add_cpu(int cpu); +extern void __cpuinit numa_remove_cpu(int cpu); #else /* CONFIG_NUMA */ static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } +static inline void numa_add_cpu(int cpu) { } +static inline void numa_remove_cpu(int cpu) { } #endif /* CONFIG_NUMA */ +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); +#endif + #endif /* _ASM_X86_NUMA_H */ diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index cdf8043..bc66031 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h @@ -4,7 +4,6 @@ extern int numa_off; extern int pxm_to_nid(int pxm); -extern void numa_remove_cpu(int cpu); #ifdef CONFIG_NUMA extern int __cpuinit numa_cpu_node(int apicid); diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 6ead9f3..123f185 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -30,8 +30,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, extern void __init init_cpu_to_node(void); extern int __cpuinit numa_cpu_node(int cpu); -extern void __cpuinit numa_add_cpu(int cpu); -extern void __cpuinit numa_remove_cpu(int cpu); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) @@ -41,8 +39,6 @@ void numa_emu_cmdline(char *); #else static inline void init_cpu_to_node(void) { } static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } -static inline void numa_add_cpu(int cpu, int node) { } -static inline void numa_remove_cpu(int cpu) { } #endif #endif /* _ASM_X86_NUMA_64_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834..a2559c3 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) select_idle_routine(c); -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2c20382..522b217 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -132,49 +132,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); atomic_t init_deasserted; -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) -/* set up a mapping between cpu and node. */ -static void map_cpu_to_node(int cpu, int node) -{ - printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); -} - -/* undo a mapping between cpu and node. */ -static void unmap_cpu_to_node(int cpu) -{ - int node; - - printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); - for (node = 0; node < MAX_NUMNODES; node++) - cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); -} -#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ -#define map_cpu_to_node(cpu, node) ({}) -#define unmap_cpu_to_node(cpu) ({}) -#endif - -#ifdef CONFIG_X86_32 -static void map_cpu_to_logical_apicid(void) -{ - int cpu = smp_processor_id(); - int node; - - node = numa_cpu_node(cpu); - if (!node_online(node)) - node = first_online_node; - - map_cpu_to_node(cpu, node); -} - -void numa_remove_cpu(int cpu) -{ - unmap_cpu_to_node(cpu); -} -#else -#define map_cpu_to_logical_apicid() do {} while (0) -#endif - /* * Report back to the Boot Processor. * Running on AP. @@ -242,7 +199,6 @@ static void __cpuinit smp_callin(void) apic->smp_callin_clear_local_apic(); setup_local_APIC(); end_local_APIC_setup(); - map_cpu_to_logical_apicid(); /* * Need to setup vector mappings before we enable interrupts. @@ -943,7 +899,6 @@ static __init void disable_smp(void) physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); else physid_set_mask_of_physid(0, &phys_cpu_present_map); - map_cpu_to_logical_apicid(); cpumask_set_cpu(0, cpu_sibling_mask(0)); cpumask_set_cpu(0, cpu_core_mask(0)); } @@ -1120,8 +1075,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) end_local_APIC_setup(); - map_cpu_to_logical_apicid(); - if (apic->setup_portio_remap) apic->setup_portio_remap(); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 187810b..75abecb 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -99,7 +99,21 @@ void __init setup_node_to_cpumask_map(void) pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); } -#ifdef CONFIG_DEBUG_PER_CPU_MAPS +#ifndef CONFIG_DEBUG_PER_CPU_MAPS + +# ifndef CONFIG_NUMA_EMU +void __cpuinit numa_add_cpu(int cpu) +{ + cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); +} +# endif /* !CONFIG_NUMA_EMU */ + +#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ int __cpu_to_node(int cpu) { @@ -131,6 +145,52 @@ int early_cpu_to_node(int cpu) return per_cpu(x86_cpu_to_node_map, cpu); } +struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) +{ + int node = early_cpu_to_node(cpu); + struct cpumask *mask; + char buf[64]; + + mask = node_to_cpumask_map[node]; + if (!mask) { + pr_err("node_to_cpumask_map[%i] NULL\n", node); + dump_stack(); + return NULL; + } + + cpulist_scnprintf(buf, sizeof(buf), mask); + printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", + enable ? "numa_add_cpu" : "numa_remove_cpu", + cpu, node, buf); + return mask; +} + +# ifndef CONFIG_NUMA_EMU +static void __cpuinit numa_set_cpumask(int cpu, int enable) +{ + struct cpumask *mask; + + mask = debug_cpumask_set_cpu(cpu, enable); + if (!mask) + return; + + if (enable) + cpumask_set_cpu(cpu, mask); + else + cpumask_clear_cpu(cpu, mask); +} + +void __cpuinit numa_add_cpu(int cpu) +{ + numa_set_cpumask(cpu, 1); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + numa_set_cpumask(cpu, 0); +} +# endif /* !CONFIG_NUMA_EMU */ + /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ @@ -154,4 +214,4 @@ const struct cpumask *cpumask_of_node(int node) } EXPORT_SYMBOL(cpumask_of_node); -#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ +#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index f545940..14664f5 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -726,19 +726,18 @@ int __cpuinit numa_cpu_node(int cpu) return NUMA_NO_NODE; } -#ifndef CONFIG_DEBUG_PER_CPU_MAPS - -#ifndef CONFIG_NUMA_EMU -void __cpuinit numa_add_cpu(int cpu) -{ - cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); -} - -void __cpuinit numa_remove_cpu(int cpu) -{ - cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); -} -#else +/* + * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use + * of 64bit specific data structures. The distinction is artificial and + * should be removed. numa_{add|remove}_cpu() are implemented in numa.c + * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when + * enabled. + * + * NUMA emulation is planned to be made generic and the following and other + * related code should be moved to numa.c. + */ +#ifdef CONFIG_NUMA_EMU +# ifndef CONFIG_DEBUG_PER_CPU_MAPS void __cpuinit numa_add_cpu(int cpu) { unsigned long addr; @@ -778,47 +777,7 @@ void __cpuinit numa_remove_cpu(int cpu) for_each_online_node(i) cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); } -#endif /* !CONFIG_NUMA_EMU */ - -#else /* CONFIG_DEBUG_PER_CPU_MAPS */ -static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) -{ - int node = early_cpu_to_node(cpu); - struct cpumask *mask; - char buf[64]; - - mask = node_to_cpumask_map[node]; - if (!mask) { - pr_err("node_to_cpumask_map[%i] NULL\n", node); - dump_stack(); - return NULL; - } - - cpulist_scnprintf(buf, sizeof(buf), mask); - printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", - enable ? "numa_add_cpu" : "numa_remove_cpu", - cpu, node, buf); - return mask; -} - -/* - * --------- debug versions of the numa functions --------- - */ -#ifndef CONFIG_NUMA_EMU -static void __cpuinit numa_set_cpumask(int cpu, int enable) -{ - struct cpumask *mask; - - mask = debug_cpumask_set_cpu(cpu, enable); - if (!mask) - return; - - if (enable) - cpumask_set_cpu(cpu, mask); - else - cpumask_clear_cpu(cpu, mask); -} -#else +# else /* !CONFIG_DEBUG_PER_CPU_MAPS */ static void __cpuinit numa_set_cpumask(int cpu, int enable) { int node = early_cpu_to_node(cpu); @@ -842,7 +801,6 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) cpumask_clear_cpu(cpu, mask); } } -#endif /* CONFIG_NUMA_EMU */ void __cpuinit numa_add_cpu(int cpu) { @@ -853,8 +811,5 @@ void __cpuinit numa_remove_cpu(int cpu) { numa_set_cpumask(cpu, 0); } -/* - * --------- end of debug versions of the numa functions --------- - */ - -#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ +# endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ +#endif /* CONFIG_NUMA_EMU */ -- cgit v0.10.2 From 8db78cc4b4048e3add40bca1bc3e55057c319256 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 23 Jan 2011 14:37:42 +0100 Subject: x86: Unify NUMA initialization between 32 and 64bit Now that everything else is unified, NUMA initialization can be unified too. * numa_init_array() and init_cpu_to_node() are moved from numa_64 to numa. * numa_32::initmem_init() is updated to call numa_init_array() and setup_arch() to call init_cpu_to_node() on 32bit too. * x86_cpu_to_node_map is now initialized to NUMA_NO_NODE on 32bit too. This is safe now as numa_init_array() will initialize it early during boot. This makes NUMA mapping fully initialized before setup_per_cpu_areas() on 32bit too and thus makes the first percpu chunk which contains all the static variables and some of dynamic area allocated with NUMA affinity correctly considered. Signed-off-by: Tejun Heo Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <1295789862-25482-17-git-send-email-tj@kernel.org> Signed-off-by: Ingo Molnar Reported-by: Eric Dumazet Reviewed-by: Pekka Enberg diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index d3964b2..26fc6e2 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -34,11 +34,15 @@ static inline void set_apicid_to_node(int apicid, s16 node) #ifdef CONFIG_NUMA extern void __cpuinit numa_set_node(int cpu, int node); extern void __cpuinit numa_clear_node(int cpu); +extern void __init numa_init_array(void); +extern void __init init_cpu_to_node(void); extern void __cpuinit numa_add_cpu(int cpu); extern void __cpuinit numa_remove_cpu(int cpu); #else /* CONFIG_NUMA */ static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } +static inline void numa_init_array(void) { } +static inline void init_cpu_to_node(void) { } static inline void numa_add_cpu(int cpu) { } static inline void numa_remove_cpu(int cpu) { } #endif /* CONFIG_NUMA */ diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 123f185..2819afa 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -13,7 +13,6 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks, #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) -extern void numa_init_array(void); extern int numa_off; extern unsigned long numa_free_all_bootmem(void); @@ -28,7 +27,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, */ #define NODE_MIN_SIZE (4*1024*1024) -extern void __init init_cpu_to_node(void); extern int __cpuinit numa_cpu_node(int cpu); #ifdef CONFIG_NUMA_EMU @@ -37,7 +35,6 @@ extern int __cpuinit numa_cpu_node(int cpu); void numa_emu_cmdline(char *); #endif /* CONFIG_NUMA_EMU */ #else -static inline void init_cpu_to_node(void) { } static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } #endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3cfe26c..1202341 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1040,9 +1040,7 @@ void __init setup_arch(char **cmdline_p) prefill_possible_map(); -#ifdef CONFIG_X86_64 init_cpu_to_node(); -#endif init_apic_mappings(); ioapic_and_gsi_init(); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 75abecb..bf60715 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -38,11 +38,7 @@ EXPORT_SYMBOL(node_to_cpumask_map); /* * Map cpu index to node index */ -#ifdef CONFIG_X86_32 -DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, 0); -#else DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); -#endif EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); void __cpuinit numa_set_node(int cpu, int node) @@ -99,6 +95,78 @@ void __init setup_node_to_cpumask_map(void) pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); } +/* + * There are unfortunately some poorly designed mainboards around that + * only connect memory to a single CPU. This breaks the 1:1 cpu->node + * mapping. To avoid this fill in the mapping for all possible CPUs, + * as the number of CPUs is not known yet. We round robin the existing + * nodes. + */ +void __init numa_init_array(void) +{ + int rr, i; + + rr = first_node(node_online_map); + for (i = 0; i < nr_cpu_ids; i++) { + if (early_cpu_to_node(i) != NUMA_NO_NODE) + continue; + numa_set_node(i, rr); + rr = next_node(rr, node_online_map); + if (rr == MAX_NUMNODES) + rr = first_node(node_online_map); + } +} + +static __init int find_near_online_node(int node) +{ + int n, val; + int min_val = INT_MAX; + int best_node = -1; + + for_each_online_node(n) { + val = node_distance(node, n); + + if (val < min_val) { + min_val = val; + best_node = n; + } + } + + return best_node; +} + +/* + * Setup early cpu_to_node. + * + * Populate cpu_to_node[] only if x86_cpu_to_apicid[], + * and apicid_to_node[] tables have valid entries for a CPU. + * This means we skip cpu_to_node[] initialisation for NUMA + * emulation and faking node case (when running a kernel compiled + * for NUMA on a non NUMA box), which is OK as cpu_to_node[] + * is already initialized in a round robin manner at numa_init_array, + * prior to this call, and this initialization is good enough + * for the fake NUMA cases. + * + * Called before the per_cpu areas are setup. + */ +void __init init_cpu_to_node(void) +{ + int cpu; + u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); + + BUG_ON(cpu_to_apicid == NULL); + + for_each_possible_cpu(cpu) { + int node = numa_cpu_node(cpu); + + if (node == NUMA_NO_NODE) + continue; + if (!node_online(node)) + node = find_near_online_node(node); + numa_set_node(cpu, node); + } +} + #ifndef CONFIG_DEBUG_PER_CPU_MAPS # ifndef CONFIG_NUMA_EMU diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 8d91d22..505bb04 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -367,6 +367,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, */ get_memcfg_numa(); + numa_init_array(); kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 14664f5..f548fbf 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -224,28 +224,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } -/* - * There are unfortunately some poorly designed mainboards around that - * only connect memory to a single CPU. This breaks the 1:1 cpu->node - * mapping. To avoid this fill in the mapping for all possible CPUs, - * as the number of CPUs is not known yet. We round robin the existing - * nodes. - */ -void __init numa_init_array(void) -{ - int rr, i; - - rr = first_node(node_online_map); - for (i = 0; i < nr_cpu_ids; i++) { - if (early_cpu_to_node(i) != NUMA_NO_NODE) - continue; - numa_set_node(i, rr); - rr = next_node(rr, node_online_map); - if (rr == MAX_NUMNODES) - rr = first_node(node_online_map); - } -} - #ifdef CONFIG_NUMA_EMU /* Numa emulation */ static struct bootnode nodes[MAX_NUMNODES] __initdata; @@ -664,59 +642,6 @@ unsigned long __init numa_free_all_bootmem(void) return pages; } -#ifdef CONFIG_NUMA - -static __init int find_near_online_node(int node) -{ - int n, val; - int min_val = INT_MAX; - int best_node = -1; - - for_each_online_node(n) { - val = node_distance(node, n); - - if (val < min_val) { - min_val = val; - best_node = n; - } - } - - return best_node; -} - -/* - * Setup early cpu_to_node. - * - * Populate cpu_to_node[] only if x86_cpu_to_apicid[], - * and apicid_to_node[] tables have valid entries for a CPU. - * This means we skip cpu_to_node[] initialisation for NUMA - * emulation and faking node case (when running a kernel compiled - * for NUMA on a non NUMA box), which is OK as cpu_to_node[] - * is already initialized in a round robin manner at numa_init_array, - * prior to this call, and this initialization is good enough - * for the fake NUMA cases. - * - * Called before the per_cpu areas are setup. - */ -void __init init_cpu_to_node(void) -{ - int cpu; - u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); - - BUG_ON(cpu_to_apicid == NULL); - - for_each_possible_cpu(cpu) { - int node = numa_cpu_node(cpu); - - if (node == NUMA_NO_NODE) - continue; - if (!node_online(node)) - node = find_near_online_node(node); - numa_set_node(cpu, node); - } -} -#endif - int __cpuinit numa_cpu_node(int cpu) { int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); -- cgit v0.10.2 From 4e62445b90ac4ef708bd11c7ae052b1d5ef765b5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Jan 2011 17:22:48 +0100 Subject: x86: Fix build failure on X86_UP_APIC Commit 4c321ff8 (x86: Replace cpu_2_logical_apicid[] with early percpu variable) and following changes introduced and used x86_cpu_to_logical_apicid percpu variable. It was declared and defined inside CONFIG_SMP && CONFIG_X86_32 but if CONFIG_X86_UP_APIC is set UP configuration makes use of it and build fails. Fix it by declaring and defining it inside CONFIG_X86_LOCAL_APIC && CONFIG_X86_32. Signed-off-by: Tejun Heo Reported-by: Ingo Molnar Cc: eric.dumazet@gmail.com Cc: yinghai@kernel.org Cc: brgerst@gmail.com Cc: gorcunov@gmail.com Cc: penberg@kernel.org Cc: shaohui.zheng@intel.com Cc: rientjes@google.com LKML-Reference: <20110128162248.GA25746@htj.dyndns.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index dc7c46a..7592782 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -38,7 +38,7 @@ static inline struct cpumask *cpu_core_mask(int cpu) DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); -#if defined(CONFIG_SMP) && defined(CONFIG_X86_32) +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); #endif diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4686ea5..1390cf9 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -79,7 +79,6 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); #ifdef CONFIG_X86_32 -#ifdef CONFIG_SMP /* * On x86_32, the mapping between cpu and logical apicid may vary * depending on apic in use. The following early percpu variable is @@ -87,7 +86,6 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); * actually diverge. Let's keep it ugly for now. */ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID); -#endif /* * Knob to control our willingness to enable the local APIC. -- cgit v0.10.2 From eff9073790e1286aa12bf1c65814d3e0132b12e1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 31 Jan 2011 16:59:05 +0100 Subject: x86: Rename incorrectly named parameter of numa_cpu_node() numa_cpu_node() prototype in numa_32.h has wrongly named parameter @apicid when it actually takes the CPU number. Change it to @cpu. Reported-by: Yinghai Lu Signed-off-by: Tejun Heo LKML-Reference: <20110131155905.GM7459@htj.dyndns.org> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index bc66031..c6beed1 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h @@ -6,7 +6,7 @@ extern int numa_off; extern int pxm_to_nid(int pxm); #ifdef CONFIG_NUMA -extern int __cpuinit numa_cpu_node(int apicid); +extern int __cpuinit numa_cpu_node(int cpu); #else /* CONFIG_NUMA */ static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } #endif /* CONFIG_NUMA */ -- cgit v0.10.2 From cabb5bd7ff4d6963ec9e67f958fc30e7815425e6 Mon Sep 17 00:00:00 2001 From: Hans Rosenfeld Date: Mon, 7 Feb 2011 18:10:39 +0100 Subject: x86, amd: Support L3 Cache Partitioning on AMD family 0x15 CPUs L3 Cache Partitioning allows selecting which of the 4 L3 subcaches can be used for evictions by the L2 cache of each compute unit. By writing a 4-bit hexadecimal mask into the the sysfs file /sys/devices/system/cpu/cpuX/cache/index3/subcaches, the user can set the enabled subcaches for a CPU. The settings are directly read from and written to the hardware, so there is no way to have contradicting settings for two CPUs belonging to the same compute unit. Writing will always overwrite any previous setting for a compute unit. Signed-off-by: Hans Rosenfeld Cc: LKML-Reference: <1297098639-431383-1-git-send-email-hans.rosenfeld@amd.com> [ -v3: minor style fixes ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 3e70700..423f11c 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -18,6 +18,8 @@ extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); extern int amd_scan_nodes(void); +extern int amd_get_subcaches(int); +extern int amd_set_subcaches(int, int); #ifdef CONFIG_NUMA_EMU extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); @@ -38,6 +40,7 @@ extern struct amd_northbridge_info amd_northbridges; #define AMD_NB_GART 0x1 #define AMD_NB_L3_INDEX_DISABLE 0x2 +#define AMD_NB_L3_PARTITIONING 0x4 #ifdef CONFIG_AMD_NB diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 4ae9a96..bf79a4a 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -95,6 +95,10 @@ int amd_cache_northbridges(void) if (boot_cpu_data.x86 == 0x15) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; + /* L3 cache partitioning is supported on family 0x15 */ + if (boot_cpu_data.x86 == 0x15) + amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; + return 0; } EXPORT_SYMBOL_GPL(amd_cache_northbridges); @@ -112,6 +116,65 @@ int __init early_is_amd_nb(u32 device) return 0; } +int amd_get_subcaches(int cpu) +{ + struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; + unsigned int mask; + int cuid = 0; + + if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return 0; + + pci_read_config_dword(link, 0x1d4, &mask); + +#ifdef CONFIG_SMP + cuid = cpu_data(cpu).compute_unit_id; +#endif + return (mask >> (4 * cuid)) & 0xf; +} + +int amd_set_subcaches(int cpu, int mask) +{ + static unsigned int reset, ban; + struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); + unsigned int reg; + int cuid = 0; + + if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) + return -EINVAL; + + /* if necessary, collect reset state of L3 partitioning and BAN mode */ + if (reset == 0) { + pci_read_config_dword(nb->link, 0x1d4, &reset); + pci_read_config_dword(nb->misc, 0x1b8, &ban); + ban &= 0x180000; + } + + /* deactivate BAN mode if any subcaches are to be disabled */ + if (mask != 0xf) { + pci_read_config_dword(nb->misc, 0x1b8, ®); + pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); + } + +#ifdef CONFIG_SMP + cuid = cpu_data(cpu).compute_unit_id; +#endif + mask <<= 4 * cuid; + mask |= (0xf ^ (1 << cuid)) << 26; + + pci_write_config_dword(nb->link, 0x1d4, mask); + + /* reset BAN mode if L3 partitioning returned to reset state */ + pci_read_config_dword(nb->link, 0x1d4, ®); + if (reg == reset) { + pci_read_config_dword(nb->misc, 0x1b8, ®); + reg &= ~0x180000; + pci_write_config_dword(nb->misc, 0x1b8, reg | ban); + } + + return 0; +} + int amd_cache_gart(void) { int i; diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ec2c19a..90cc675 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, struct _cache_attr { struct attribute attr; - ssize_t (*show)(struct _cpuid4_info *, char *); - ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); + ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); + ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, + unsigned int); }; #ifdef CONFIG_AMD_NB @@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, #define SHOW_CACHE_DISABLE(slot) \ static ssize_t \ -show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ +show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ + unsigned int cpu) \ { \ return show_cache_disable(this_leaf, buf, slot); \ } @@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, #define STORE_CACHE_DISABLE(slot) \ static ssize_t \ store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ - const char *buf, size_t count) \ + const char *buf, size_t count, \ + unsigned int cpu) \ { \ return store_cache_disable(this_leaf, buf, count, slot); \ } @@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1); +static ssize_t +show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) +{ + if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return -EINVAL; + + return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); +} + +static ssize_t +store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, + unsigned int cpu) +{ + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return -EINVAL; + + if (strict_strtoul(buf, 16, &val) < 0) + return -EINVAL; + + if (amd_set_subcaches(cpu, val)) + return -EINVAL; + + return count; +} + +static struct _cache_attr subcaches = + __ATTR(subcaches, 0644, show_subcaches, store_subcaches); + #else /* CONFIG_AMD_NB */ #define amd_init_l3_cache(x, y) #endif /* CONFIG_AMD_NB */ @@ -532,9 +568,9 @@ static int __cpuinit cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) { - union _cpuid4_leaf_eax eax; - union _cpuid4_leaf_ebx ebx; - union _cpuid4_leaf_ecx ecx; + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; unsigned edx; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { @@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) #define show_one_plus(file_name, object, val) \ -static ssize_t show_##file_name \ - (struct _cpuid4_info *this_leaf, char *buf) \ +static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ + unsigned int cpu) \ { \ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ } @@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); -static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) +static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, + unsigned int cpu) { return sprintf(buf, "%luK\n", this_leaf->size / 1024); } @@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, return n; } -static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf) +static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, + unsigned int cpu) { return show_shared_cpu_map_func(leaf, 0, buf); } -static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf) +static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, + unsigned int cpu) { return show_shared_cpu_map_func(leaf, 1, buf); } -static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) +static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, + unsigned int cpu) { switch (this_leaf->eax.split.type) { case CACHE_TYPE_DATA: @@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; + if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + n += 1; + attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); if (attrs == NULL) return attrs = default_attrs; @@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) attrs[n++] = &cache_disable_1.attr; } + if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + attrs[n++] = &subcaches.attr; + return attrs; } #endif @@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) ret = fattr->show ? fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), - buf) : + buf, this_leaf->cpu) : 0; return ret; } @@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, ret = fattr->store ? fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), - buf, count) : + buf, count, this_leaf->cpu) : 0; return ret; } -- cgit v0.10.2 From 691269f0d918cd72454c254f97722f194c07b9a8 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 9 Feb 2011 08:26:53 +0000 Subject: x86: Adjust section placement in AMD northbridge related code amd_nb_misc_ids[] can live in .rodata, and enable_pci_io_ecs() can be moved into .cpuinit.text. Signed-off-by: Jan Beulich Cc: Hans Rosenfeld Cc: Andreas Herrmann Cc: Borislav Petkov LKML-Reference: <4D525DDD0200007800030F07@vpn.id2.novell.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 423f11c..2b33c4d 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -9,7 +9,7 @@ struct amd_nb_bus_dev_range { u8 dev_limit; }; -extern struct pci_device_id amd_nb_misc_ids[]; +extern const struct pci_device_id amd_nb_misc_ids[]; extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; struct bootnode; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index bf79a4a..ed3c2e5 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -12,7 +12,7 @@ static u32 *flush_words; -struct pci_device_id amd_nb_misc_ids[] = { +const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, @@ -36,7 +36,7 @@ struct amd_northbridge_info amd_northbridges; EXPORT_SYMBOL(amd_northbridges); static struct pci_dev *next_northbridge(struct pci_dev *dev, - struct pci_device_id *ids) + const struct pci_device_id *ids) { do { dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); @@ -107,8 +107,9 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges); they're useless anyways */ int __init early_is_amd_nb(u32 device) { - struct pci_device_id *id; + const struct pci_device_id *id; u32 vendor = device & 0xffff; + device >>= 16; for (id = amd_nb_misc_ids; id->vendor; id++) if (vendor == id->vendor && device == id->device) diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index e27dffb..026e493 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -350,7 +350,7 @@ static int __init early_fill_mp_bus_info(void) #define ENABLE_CF8_EXT_CFG (1ULL << 46) -static void enable_pci_io_ecs(void *unused) +static void __cpuinit enable_pci_io_ecs(void *unused) { u64 reg; rdmsrl(MSR_AMD64_NB_CFG, reg); -- cgit v0.10.2 From 60f6e65d7887c257392313755f95540ef5e7ea89 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 17 Jan 2011 10:52:02 +0800 Subject: x86: Cleanup vector usage Cleanup the vector usage and make them continuous if possible. Signed-off-by: Shaohua Li Cc: Andi Kleen Cc: Eric Dumazet LKML-Reference: <1295232722.1949.707.camel@sli10-conroe> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6af0894..42f0d4a 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_IRQ_VECTORS_H #define _ASM_X86_IRQ_VECTORS_H +#include /* * Linux IRQ vector layout. * @@ -16,8 +17,8 @@ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events * Vectors 32 ... 127 : device interrupts * Vector 128 : legacy int80 syscall interface - * Vectors 129 ... 237 : device interrupts - * Vectors 238 ... 255 : special interrupts + * Vectors 129 ... 229 : device interrupts + * Vectors 230 ... 255 : special interrupts * * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. * @@ -96,37 +97,38 @@ #define THRESHOLD_APIC_VECTOR 0xf9 #define REBOOT_VECTOR 0xf8 -/* f0-f7 used for spreading out TLB flushes: */ -#define INVALIDATE_TLB_VECTOR_END 0xf7 -#define INVALIDATE_TLB_VECTOR_START 0xf0 -#define NUM_INVALIDATE_TLB_VECTORS 8 - -/* - * Local APIC timer IRQ vector is on a different priority level, - * to work around the 'lost local interrupt if more than 2 IRQ - * sources per level' errata. - */ -#define LOCAL_TIMER_VECTOR 0xef - /* * Generic system vector for platform specific use */ -#define X86_PLATFORM_IPI_VECTOR 0xed +#define X86_PLATFORM_IPI_VECTOR 0xf7 /* * IRQ work vector: */ -#define IRQ_WORK_VECTOR 0xec +#define IRQ_WORK_VECTOR 0xf6 -#define UV_BAU_MESSAGE 0xea +#define UV_BAU_MESSAGE 0xf5 /* * Self IPI vector for machine checks */ -#define MCE_SELF_VECTOR 0xeb +#define MCE_SELF_VECTOR 0xf4 /* Xen vector callback to receive events in a HVM domain */ -#define XEN_HVM_EVTCHN_CALLBACK 0xe9 +#define XEN_HVM_EVTCHN_CALLBACK 0xf3 + +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xef + +/* f0-f7 used for spreading out TLB flushes: */ +#define NUM_INVALIDATE_TLB_VECTORS 8 +#define INVALIDATE_TLB_VECTOR_END 0xee +#define INVALIDATE_TLB_VECTOR_START \ + (INVALIDATE_TLB_VECTOR_END - NUM_INVALIDATE_TLB_VECTORS + 1) #define NR_VECTORS 256 -- cgit v0.10.2 From 3a09fb4570a1cce11472b8e5da3f6ee409f529d5 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 17 Jan 2011 10:52:05 +0800 Subject: x86: Allocate 32 tlb_invalidate_interrupt handler stubs Add up to 32 invalidate_interrupt handlers. How many handlers are added depends on NUM_INVALIDATE_TLB_VECTORS. So if NUM_INVALIDATE_TLB_VECTORS is smaller than 32, we reduce code size. Signed-off-by: Shaohua Li Cc: Andi Kleen Cc: Eric Dumazet LKML-Reference: <1295232725.1949.708.camel@sli10-conroe> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 57650ab..1cd6d26 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) -.irpc idx, "01234567" +.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +.if NUM_INVALIDATE_TLB_VECTORS > \idx BUILD_INTERRUPT3(invalidate_interrupt\idx, (INVALIDATE_TLB_VECTOR_START)+\idx, smp_invalidate_interrupt) +.endif .endr #endif diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 0274ec5..bb9efe8 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void); extern void invalidate_interrupt5(void); extern void invalidate_interrupt6(void); extern void invalidate_interrupt7(void); +extern void invalidate_interrupt8(void); +extern void invalidate_interrupt9(void); +extern void invalidate_interrupt10(void); +extern void invalidate_interrupt11(void); +extern void invalidate_interrupt12(void); +extern void invalidate_interrupt13(void); +extern void invalidate_interrupt14(void); +extern void invalidate_interrupt15(void); +extern void invalidate_interrupt16(void); +extern void invalidate_interrupt17(void); +extern void invalidate_interrupt18(void); +extern void invalidate_interrupt19(void); +extern void invalidate_interrupt20(void); +extern void invalidate_interrupt21(void); +extern void invalidate_interrupt22(void); +extern void invalidate_interrupt23(void); +extern void invalidate_interrupt24(void); +extern void invalidate_interrupt25(void); +extern void invalidate_interrupt26(void); +extern void invalidate_interrupt27(void); +extern void invalidate_interrupt28(void); +extern void invalidate_interrupt29(void); +extern void invalidate_interrupt30(void); +extern void invalidate_interrupt31(void); extern void irq_move_cleanup_interrupt(void); extern void reboot_interrupt(void); diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index aed1ffb..891268c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -975,9 +975,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_SMP -.irpc idx, "01234567" +.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +.if NUM_INVALIDATE_TLB_VECTORS > \idx apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ invalidate_interrupt\idx smp_invalidate_interrupt +.endif .endr #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index c752e97..7aad10a 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -164,14 +164,77 @@ static void __init smp_intr_init(void) alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPIs for invalidation */ - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); +#define ALLOC_INVTLB_VEC(NR) \ + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \ + invalidate_interrupt##NR) + + switch (NUM_INVALIDATE_TLB_VECTORS) { + default: + ALLOC_INVTLB_VEC(31); + case 31: + ALLOC_INVTLB_VEC(30); + case 30: + ALLOC_INVTLB_VEC(29); + case 29: + ALLOC_INVTLB_VEC(28); + case 28: + ALLOC_INVTLB_VEC(27); + case 27: + ALLOC_INVTLB_VEC(26); + case 26: + ALLOC_INVTLB_VEC(25); + case 25: + ALLOC_INVTLB_VEC(24); + case 24: + ALLOC_INVTLB_VEC(23); + case 23: + ALLOC_INVTLB_VEC(22); + case 22: + ALLOC_INVTLB_VEC(21); + case 21: + ALLOC_INVTLB_VEC(20); + case 20: + ALLOC_INVTLB_VEC(19); + case 19: + ALLOC_INVTLB_VEC(18); + case 18: + ALLOC_INVTLB_VEC(17); + case 17: + ALLOC_INVTLB_VEC(16); + case 16: + ALLOC_INVTLB_VEC(15); + case 15: + ALLOC_INVTLB_VEC(14); + case 14: + ALLOC_INVTLB_VEC(13); + case 13: + ALLOC_INVTLB_VEC(12); + case 12: + ALLOC_INVTLB_VEC(11); + case 11: + ALLOC_INVTLB_VEC(10); + case 10: + ALLOC_INVTLB_VEC(9); + case 9: + ALLOC_INVTLB_VEC(8); + case 8: + ALLOC_INVTLB_VEC(7); + case 7: + ALLOC_INVTLB_VEC(6); + case 6: + ALLOC_INVTLB_VEC(5); + case 5: + ALLOC_INVTLB_VEC(4); + case 4: + ALLOC_INVTLB_VEC(3); + case 3: + ALLOC_INVTLB_VEC(2); + case 2: + ALLOC_INVTLB_VEC(1); + case 1: + ALLOC_INVTLB_VEC(0); + break; + } /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); -- cgit v0.10.2 From 70e4a369733a21e3d16b059a6ccdad22a344bf57 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 17 Jan 2011 10:52:07 +0800 Subject: x86: Scale up the number of TLB invalidate vectors with NR_CPUs, up to 32 Make the maxium TLB invalidate vectors depend on NR_CPUS linearly, with a maximum of 32 vectors. We currently only have 8 vectors for TLB invalidation and that is clearly inadequate. If we have a lot of CPUs, the CPUs need share the 8 vectors and tlbstate_lock is used to protect them. flush_tlb_page() is heavily used in page reclaim, which will cause a lot of lock contention for tlbstate_lock. Andi Kleen suggested increasing the vectors number to 32, which should be good for current typical systems to reduce the tlbstate_lock contention. My test system has 4 sockets and 64G memory, and 64 CPUs. My workload creates 64 processes. Each process mmap reads a big empty sparse file. The total size of the files are 2*total_mem, so this will cause a lot of page reclaim. Below is the result I get from perf call-graph profiling: without the patch: ------------------ 24.25% usemem [kernel] [k] _raw_spin_lock | --- _raw_spin_lock | |--42.15%-- native_flush_tlb_others with the patch: ------------------ 14.96% usemem [kernel] [k] _raw_spin_lock | --- _raw_spin_lock |--13.89%-- native_flush_tlb_others So this heavily reduces the tlbstate_lock contention. Suggested-by: Andi Kleen Signed-off-by: Shaohua Li Cc: Eric Dumazet Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: <1295232727.1949.709.camel@sli10-conroe> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 42f0d4a..4980f48 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -17,8 +17,8 @@ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events * Vectors 32 ... 127 : device interrupts * Vector 128 : legacy int80 syscall interface - * Vectors 129 ... 229 : device interrupts - * Vectors 230 ... 255 : special interrupts + * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts + * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts * * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. * @@ -124,8 +124,13 @@ */ #define LOCAL_TIMER_VECTOR 0xef -/* f0-f7 used for spreading out TLB flushes: */ -#define NUM_INVALIDATE_TLB_VECTORS 8 +/* up to 32 vectors used for spreading out TLB flushes: */ +#if NR_CPUS <= 32 +# define NUM_INVALIDATE_TLB_VECTORS NR_CPUS +#else +# define NUM_INVALIDATE_TLB_VECTORS 32 +#endif + #define INVALIDATE_TLB_VECTOR_END 0xee #define INVALIDATE_TLB_VECTOR_START \ (INVALIDATE_TLB_VECTOR_END - NUM_INVALIDATE_TLB_VECTORS + 1) -- cgit v0.10.2 From 7064d865af804b9b841e7b9a3e9b653e40c3e5ca Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 17 Jan 2011 10:52:10 +0800 Subject: x86: Avoid tlbstate lock if not enough cpus This one isn't related to previous patch. If online cpus are below NUM_INVALIDATE_TLB_VECTORS, we don't need the lock. The comments in the code declares we don't need the check, but a hot lock still needs an atomic operation and expensive, so add the check here. Uses nr_cpu_ids here as suggested by Eric Dumazet. Signed-off-by: Shaohua Li Acked-by: Eric Dumazet Cc: Andi Kleen LKML-Reference: <1295232730.1949.710.camel@sli10-conroe> Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 6acc724..55272d7 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, sender = this_cpu_read(tlb_vector_offset); f = &flush_state[sender]; - /* - * Could avoid this lock when - * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is - * probably not worth checking this for a cache-hot lock. - */ - raw_spin_lock(&f->tlbstate_lock); + if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) + raw_spin_lock(&f->tlbstate_lock); f->flush_mm = mm; f->flush_va = va; @@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, f->flush_mm = NULL; f->flush_va = 0; - raw_spin_unlock(&f->tlbstate_lock); + if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) + raw_spin_unlock(&f->tlbstate_lock); } void native_flush_tlb_others(const struct cpumask *cpumask, -- cgit v0.10.2 From 77eed821accf5dd962b1f13bed0680e217e49112 Mon Sep 17 00:00:00 2001 From: Kamal Mostafa Date: Thu, 3 Feb 2011 17:38:04 -0800 Subject: x86: Fix panic when handling "mem={invalid}" param Avoid removing all of memory and panicing when "mem={invalid}" is specified, e.g. mem=blahblah, mem=0, or mem=nopentium (on platforms other than x86_32). Signed-off-by: Kamal Mostafa BugLink: http://bugs.launchpad.net/bugs/553464 Cc: Yinghai Lu Cc: Len Brown Cc: Rafael J. Wysocki Cc: # .3x: as far back as it applies LKML-Reference: <1296783486-23033-1-git-send-email-kamal@canonical.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 294f26d..55a59d8 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -856,6 +856,9 @@ static int __init parse_memopt(char *p) userdef = 1; mem_size = memparse(p, &p); + /* don't remove all of memory when handling "mem={invalid}" param */ + if (mem_size == 0) + return -EINVAL; e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return 0; -- cgit v0.10.2 From 9a6d44b9adb777ca9549e88cd55bd8f2673c52a2 Mon Sep 17 00:00:00 2001 From: Kamal Mostafa Date: Thu, 3 Feb 2011 17:38:05 -0800 Subject: x86: Emit "mem=nopentium ignored" warning when not supported Emit warning when "mem=nopentium" is specified on any arch other than x86_32 (the only that arch supports it). Signed-off-by: Kamal Mostafa BugLink: http://bugs.launchpad.net/bugs/553464 Cc: Yinghai Lu Cc: Len Brown Cc: Rafael J. Wysocki LKML-Reference: <1296783486-23033-2-git-send-email-kamal@canonical.com> Signed-off-by: Ingo Molnar Cc: diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 55a59d8..0b5e2b5 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -847,12 +847,15 @@ static int __init parse_memopt(char *p) if (!p) return -EINVAL; -#ifdef CONFIG_X86_32 if (!strcmp(p, "nopentium")) { +#ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_PSE); return 0; - } +#else + printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); + return -EINVAL; #endif + } userdef = 1; mem_size = memparse(p, &p); -- cgit v0.10.2 From 14392fd329eca9b59d51c0aa5d0acfb4965424d1 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 7 Feb 2011 14:08:53 -0800 Subject: x86, numa: Add error handling for bad cpu-to-node mappings CONFIG_DEBUG_PER_CPU_MAPS may return NUMA_NO_NODE when an early_cpu_to_node() mapping hasn't been initialized. In such a case, it emits a warning and continues without an issue but callers may try to use the return value to index into an array. We can catch those errors and fail silently since a warning has already been emitted. No current user of numa_add_cpu() requires this error checking to avoid a crash, but it's better to be proactive in case a future user happens to have a bug and a user tries to diagnose it with CONFIG_DEBUG_PER_CPU_MAPS. Reported-by: Jesper Juhl Signed-off-by: David Rientjes Cc: Tejun Heo LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index bf60715..9559d36 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -219,6 +219,10 @@ struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) struct cpumask *mask; char buf[64]; + if (node == NUMA_NO_NODE) { + /* early_cpu_to_node() already emits a warning and trace */ + return NULL; + } mask = node_to_cpumask_map[node]; if (!mask) { pr_err("node_to_cpumask_map[%i] NULL\n", node); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index f548fbf..3f9411e 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -709,6 +709,10 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) struct cpumask *mask; int i; + if (node == NUMA_NO_NODE) { + /* early_cpu_to_node() already emits a warning and trace */ + return; + } for_each_online_node(i) { unsigned long addr; -- cgit v0.10.2 From 9e81509efc4fefcdd75cc6a4121672fa71ae8745 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 14 Feb 2011 18:14:51 +0100 Subject: x86, amd: Initialize variable properly Commit d518573de63f ("x86, amd: Normalize compute unit IDs on multi-node processors") introduced compute unit normalization but causes a compiler warning: arch/x86/kernel/cpu/amd.c: In function 'amd_detect_cmp': arch/x86/kernel/cpu/amd.c:268: warning: 'cores_per_cu' may be used uninitialized in this function arch/x86/kernel/cpu/amd.c:268: note: 'cores_per_cu' was declared here The compiler is right - initialize it with a proper value. Also, fix up a comment while at it. Reported-by: Andrew Morton Signed-off-by: Borislav Petkov Cc: Andreas Herrmann LKML-Reference: <20110214171451.GB10076@kryptos.osrc.amd.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 990cc48..589bdd7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -261,7 +261,7 @@ static int __cpuinit nearby_node(int apicid) #ifdef CONFIG_X86_HT static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) { - u32 nodes, cores_per_cu; + u32 nodes, cores_per_cu = 1; u8 node_id; int cpu = smp_processor_id(); @@ -276,7 +276,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* get compute unit information */ smp_num_siblings = ((ebx >> 8) & 3) + 1; c->compute_unit_id = ebx & 0xff; - cores_per_cu = ((ebx >> 8) & 3) + 1; + cores_per_cu += ((ebx >> 8) & 3); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; @@ -298,7 +298,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* store NodeID, use llc_shared_map to store sibling info */ per_cpu(cpu_llc_id, cpu) = node_id; - /* core id to be in range from 0 to (cores_per_node - 1) */ + /* core id has to be in the [0 .. cores_per_node - 1] range */ c->cpu_core_id %= cores_per_node; c->compute_unit_id %= cus_per_node; } -- cgit v0.10.2 From 7d36b7bc9022f35f95cd85cdf441846298e8f9fb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86-64, NUMA: Make dummy node initialization path similar to non-dummy ones Dummy node initialization in initmem_init() didn't initialize apicid to node mapping and set cpu to node mapping directly by caling numa_set_node(), which is different from non-dummy init paths. Update it such that they behave similarly. Initialize apicid to node mapping and call numa_init_array(). The actual cpu to node mapping is handled by init_cpu_to_node() later. Signed-off-by: Tejun Heo Acked-by: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 43ad327..b7d78d7 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -624,11 +624,12 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, memnodemap[0] = 0; node_set_online(0); node_set(0, node_possible_map); - for (i = 0; i < nr_cpu_ids; i++) - numa_set_node(i, 0); + for (i = 0; i < MAX_LOCAL_APIC; i++) + set_apicid_to_node(i, NUMA_NO_NODE); memblock_x86_register_active_regions(0, start_pfn, last_pfn); init_memory_mapping_high(); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); + numa_init_array(); } unsigned long __init numa_free_all_bootmem(void) -- cgit v0.10.2 From 13081df5dd6eae1951a3c398fa17d3ed2037a78f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86-64, NUMA: Simplify hotplug node handling in acpi_numa_memory_affinity_init() Hotplug node handling in acpi_numa_memory_affinity_init() was unnecessarily complicated with storing the original nodes[] entry and restoring it afterwards. Simplify it by not modifying the nodes[] entry for hotplug nodes from the beginning. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 23498f8..988b0b70 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -251,7 +251,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end) void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { - struct bootnode *nd, oldnode; + struct bootnode *nd; unsigned long start, end; int node, pxm; int i; @@ -289,28 +289,23 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) bad_srat(); return; } - nd = &nodes[node]; - oldnode = *nd; - if (!node_test_and_set(node, nodes_parsed)) { - nd->start = start; - nd->end = end; - } else { - if (start < nd->start) - nd->start = start; - if (nd->end < end) - nd->end = end; - } printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, start, end); - if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { + if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { + nd = &nodes[node]; + if (!node_test_and_set(node, nodes_parsed)) { + nd->start = start; + nd->end = end; + } else { + if (start < nd->start) + nd->start = start; + if (nd->end < end) + nd->end = end; + } + } else update_nodes_add(node, start, end); - /* restore nodes[node] */ - *nd = oldnode; - if ((nd->start | nd->end) == 0) - node_clear(node, nodes_parsed); - } node_memblk_range[num_node_memblks].start = start; node_memblk_range[num_node_memblks].end = end; -- cgit v0.10.2 From 86ef4dbf1f736bb1a4d567e043e3dd81b8b7860c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86, NUMA: Drop @start/last_pfn from initmem_init() initmem_init() extensively accesses and modifies global data structures and the parameters aren't even followed depending on which path is being used. Drop @start/last_pfn and let it deal with @max_pfn directly. This is in preparation for further NUMA init cleanups. - v2: x86-32 initmem_init() weren't updated breaking 32bit builds. Fixed. Found by Yinghai. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 731d211..eb9ed00 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -56,8 +56,7 @@ extern unsigned long init_memory_mapping(unsigned long start, void init_memory_mapping_high(void); -extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8); +extern void initmem_init(int acpi, int k8); extern void free_initmem(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ac909ba..756d640 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1003,7 +1003,7 @@ void __init setup_arch(char **cmdline_p) amd = !amd_numa_init(0, max_pfn); #endif - initmem_init(0, max_pfn, acpi, amd); + initmem_init(acpi, amd); memblock_find_dma_reserve(); dma32_reserve_bootmem(); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c821074..16adb66 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -644,8 +644,7 @@ void __init find_low_pfn_range(void) } #ifndef CONFIG_NEED_MULTIPLE_NODES -void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8) +void __init initmem_init(int acpi, int k8) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 194f273..04cc027 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -603,10 +603,9 @@ kernel_physical_mapping_init(unsigned long start, } #ifndef CONFIG_NUMA -void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8) +void __init initmem_init(int acpi, int k8) { - memblock_x86_register_active_regions(0, start_pfn, end_pfn); + memblock_x86_register_active_regions(0, 0, max_pfn); init_memory_mapping_high(); } #endif diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 505bb04..3249b37 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -352,8 +352,7 @@ static void init_remap_allocator(int nid) (ulong) node_remap_end_vaddr[nid]); } -void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8) +void __init initmem_init(int acpi, int k8) { int nid; long kva_target_pfn; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index b7d78d7..d7e4aaf 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -579,8 +579,7 @@ static int __init numa_emulation(unsigned long start_pfn, } #endif /* CONFIG_NUMA_EMU */ -void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, - int acpi, int amd) +void __init initmem_init(int acpi, int amd) { int i; @@ -588,19 +587,16 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, nodes_clear(node_online_map); #ifdef CONFIG_NUMA_EMU - setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, - acpi, amd); - if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) + setup_physnodes(0, max_pfn << PAGE_SHIFT, acpi, amd); + if (cmdline && !numa_emulation(0, max_pfn, acpi, amd)) return; - setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, - acpi, amd); + setup_physnodes(0, max_pfn << PAGE_SHIFT, acpi, amd); nodes_clear(node_possible_map); nodes_clear(node_online_map); #endif #ifdef CONFIG_ACPI_NUMA - if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, - last_pfn << PAGE_SHIFT)) + if (!numa_off && acpi && !acpi_scan_nodes(0, max_pfn << PAGE_SHIFT)) return; nodes_clear(node_possible_map); nodes_clear(node_online_map); @@ -616,8 +612,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at %016lx-%016lx\n", - start_pfn << PAGE_SHIFT, - last_pfn << PAGE_SHIFT); + 0LU, max_pfn << PAGE_SHIFT); /* setup dummy node covering all memory */ memnode_shift = 63; memnodemap = memnode.embedded_map; @@ -626,9 +621,9 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, node_set(0, node_possible_map); for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); - memblock_x86_register_active_regions(0, start_pfn, last_pfn); + memblock_x86_register_active_regions(0, 0, max_pfn); init_memory_mapping_high(); - setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); + setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT); numa_init_array(); } -- cgit v0.10.2 From 940fed2e79a15cf0d006c860d7811adbe5c19882 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86-64, NUMA: Unify {acpi|amd}_{numa_init|scan_nodes}() arguments and return values The functions used during NUMA initialization - *_numa_init() and *_scan_nodes() - have different arguments and return values. Unify them such that they all take no argument and return 0 on success and -errno on failure. This is in preparation for further NUMA init cleanups. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 211ca3f..4e5dff9 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -187,7 +187,7 @@ struct bootnode; extern int acpi_numa; extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start, unsigned long end); -extern int acpi_scan_nodes(unsigned long start, unsigned long end); +extern int acpi_scan_nodes(void); #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) #ifdef CONFIG_NUMA_EMU diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 2b33c4d..dc3c6e3 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -16,7 +16,7 @@ struct bootnode; extern int early_is_amd_nb(u32 value); extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); -extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); +extern int amd_numa_init(void); extern int amd_scan_nodes(void); extern int amd_get_subcaches(int); extern int amd_set_subcaches(int, int); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 756d640..96810a3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -995,12 +995,12 @@ void __init setup_arch(char **cmdline_p) /* * Parse SRAT to discover nodes. */ - acpi = acpi_numa_init(); + acpi = !acpi_numa_init(); #endif #ifdef CONFIG_AMD_NUMA if (!acpi) - amd = !amd_numa_init(0, max_pfn); + amd = !amd_numa_init(); #endif initmem_init(acpi, amd); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 2523c35..655ccff 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -51,7 +51,7 @@ static __init int find_northbridge(void) return num; } - return -1; + return -ENOENT; } static __init void early_get_boot_cpu_id(void) @@ -69,17 +69,17 @@ static __init void early_get_boot_cpu_id(void) #endif } -int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) +int __init amd_numa_init(void) { - unsigned long start = PFN_PHYS(start_pfn); - unsigned long end = PFN_PHYS(end_pfn); + unsigned long start = PFN_PHYS(0); + unsigned long end = PFN_PHYS(max_pfn); unsigned numnodes; unsigned long prevbase; int i, nb, found = 0; u32 nodeid, reg; if (!early_pci_allowed()) - return -1; + return -EINVAL; nb = find_northbridge(); if (nb < 0) @@ -90,7 +90,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) reg = read_pci_config(0, nb, 0, 0x60); numnodes = ((reg >> 4) & 0xF) + 1; if (numnodes <= 1) - return -1; + return -ENOENT; pr_info("Number of physical nodes %d\n", numnodes); @@ -121,7 +121,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) if ((base >> 8) & 3 || (limit >> 8) & 3) { pr_err("Node %d using interleaving mode %lx/%lx\n", nodeid, (base >> 8) & 3, (limit >> 8) & 3); - return -1; + return -EINVAL; } if (node_isset(nodeid, nodes_parsed)) { pr_info("Node %d already present, skipping\n", @@ -160,7 +160,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) if (prevbase > base) { pr_err("Node map not sorted %lx,%lx\n", prevbase, base); - return -1; + return -EINVAL; } pr_info("Node %d MemBase %016lx Limit %016lx\n", @@ -177,7 +177,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) } if (!found) - return -1; + return -ENOENT; return 0; } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index d7e4aaf..a083f51 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -596,7 +596,7 @@ void __init initmem_init(int acpi, int amd) #endif #ifdef CONFIG_ACPI_NUMA - if (!numa_off && acpi && !acpi_scan_nodes(0, max_pfn << PAGE_SHIFT)) + if (!numa_off && acpi && !acpi_scan_nodes()) return; nodes_clear(node_possible_map); nodes_clear(node_online_map); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 988b0b70..4f9dbf0 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -359,7 +359,7 @@ void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, #endif /* CONFIG_NUMA_EMU */ /* Use the information discovered above to actually set up the nodes. */ -int __init acpi_scan_nodes(unsigned long start, unsigned long end) +int __init acpi_scan_nodes(void) { int i; @@ -368,7 +368,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) /* First clean up the node list */ for (i = 0; i < MAX_NUMNODES; i++) - cutoff_node(i, start, end); + cutoff_node(i, 0, max_pfn << PAGE_SHIFT); /* * Join together blocks on the same node, holes between diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 5eb25eb..3b5c318 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -274,7 +274,7 @@ acpi_table_parse_srat(enum acpi_srat_type id, int __init acpi_numa_init(void) { - int ret = 0; + int cnt = 0; /* * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= @@ -288,7 +288,7 @@ int __init acpi_numa_init(void) acpi_parse_x2apic_affinity, 0); acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, acpi_parse_processor_affinity, 0); - ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, + cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); } @@ -297,7 +297,10 @@ int __init acpi_numa_init(void) acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); acpi_numa_arch_fixup(); - return ret; + + if (cnt <= 0) + return cnt ?: -ENOENT; + return 0; } int acpi_get_pxm(acpi_handle h) -- cgit v0.10.2 From a9aec56afac238e4ed3980bd10b22121b83866dd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86-64, NUMA: Wrap acpi_numa_init() so that failure can be indicated by return value Because of the way ACPI tables are parsed, the generic acpi_numa_init() couldn't return failure when error was detected by arch hooks. Instead, the failure state was recorded and later arch dependent init hook - acpi_scan_nodes() - would fail. Wrap acpi_numa_init() with x86_acpi_numa_init() so that failure can be indicated as return value immediately. This is in preparation for further NUMA init cleanups. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4e5dff9..06fb786 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -187,6 +187,7 @@ struct bootnode; extern int acpi_numa; extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start, unsigned long end); +extern int x86_acpi_numa_init(void); extern int acpi_scan_nodes(void); #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 96810a3..c9a139c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -995,7 +995,7 @@ void __init setup_arch(char **cmdline_p) /* * Parse SRAT to discover nodes. */ - acpi = !acpi_numa_init(); + acpi = !x86_acpi_numa_init(); #endif #ifdef CONFIG_AMD_NUMA diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 4f9dbf0..56b9263 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -358,6 +358,16 @@ void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, } #endif /* CONFIG_NUMA_EMU */ +int __init x86_acpi_numa_init(void) +{ + int ret; + + ret = acpi_numa_init(); + if (ret < 0) + return ret; + return srat_disabled() ? -EINVAL : 0; +} + /* Use the information discovered above to actually set up the nodes. */ int __init acpi_scan_nodes(void) { -- cgit v0.10.2 From d8fc3afc49bb226c20e37f48a4ddd493cd092837 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86, NUMA: Move *_numa_init() invocations into initmem_init() There's no reason for these to live in setup_arch(). Move them inside initmem_init(). - v2: x86-32 initmem_init() weren't updated breaking 32bit builds. Fixed. Found by Ankita. Signed-off-by: Tejun Heo Cc: Ankita Garg Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index eb9ed00..97e6007 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -56,7 +56,7 @@ extern unsigned long init_memory_mapping(unsigned long start, void init_memory_mapping_high(void); -extern void initmem_init(int acpi, int k8); +extern void initmem_init(void); extern void free_initmem(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c9a139c..46e684f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -719,8 +719,6 @@ early_param("reservelow", parse_reservelow); void __init setup_arch(char **cmdline_p) { - int acpi = 0; - int amd = 0; unsigned long flags; #ifdef CONFIG_X86_32 @@ -991,19 +989,7 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); -#ifdef CONFIG_ACPI_NUMA - /* - * Parse SRAT to discover nodes. - */ - acpi = !x86_acpi_numa_init(); -#endif - -#ifdef CONFIG_AMD_NUMA - if (!acpi) - amd = !amd_numa_init(); -#endif - - initmem_init(acpi, amd); + initmem_init(); memblock_find_dma_reserve(); dma32_reserve_bootmem(); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 16adb66..5d43fa5 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -644,7 +644,7 @@ void __init find_low_pfn_range(void) } #ifndef CONFIG_NEED_MULTIPLE_NODES -void __init initmem_init(int acpi, int k8) +void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 04cc027..4f1f461 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -603,7 +603,7 @@ kernel_physical_mapping_init(unsigned long start, } #ifndef CONFIG_NUMA -void __init initmem_init(int acpi, int k8) +void __init initmem_init(void) { memblock_x86_register_active_regions(0, 0, max_pfn); init_memory_mapping_high(); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 3249b37..bde3906 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -352,7 +352,7 @@ static void init_remap_allocator(int nid) (ulong) node_remap_end_vaddr[nid]); } -void __init initmem_init(int acpi, int k8) +void __init initmem_init(void) { int nid; long kva_target_pfn; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a083f51..656b0cf 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -579,10 +580,23 @@ static int __init numa_emulation(unsigned long start_pfn, } #endif /* CONFIG_NUMA_EMU */ -void __init initmem_init(int acpi, int amd) +void __init initmem_init(void) { + int acpi = 0, amd = 0; int i; +#ifdef CONFIG_ACPI_NUMA + /* + * Parse SRAT to discover nodes. + */ + acpi = !x86_acpi_numa_init(); +#endif + +#ifdef CONFIG_AMD_NUMA + if (!acpi) + amd = !amd_numa_init(); +#endif + nodes_clear(node_possible_map); nodes_clear(node_online_map); -- cgit v0.10.2 From ffe77a4605fb2588f8666850ad3e3b196241658f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:06 +0100 Subject: x86-64, NUMA: Restructure initmem_init() Reorganize initmem_init() such that, * Different NUMA init methods are iterated in a consistent way. * Each iteration re-initializes all the parameters and different method can be tried after a failure. * Dummy init is handled the same as other methods. Apart from how retry after failure, this patch doesn't change the behavior. The call sequences are kept equivalent across the conversion. After the change, bad_srat() doesn't need to clear apic to node mapping or worry about numa_off. Simplified accordingly. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 656b0cf..c984e34 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -580,65 +580,73 @@ static int __init numa_emulation(unsigned long start_pfn, } #endif /* CONFIG_NUMA_EMU */ -void __init initmem_init(void) +static int dummy_numa_init(void) { - int acpi = 0, amd = 0; - int i; - -#ifdef CONFIG_ACPI_NUMA - /* - * Parse SRAT to discover nodes. - */ - acpi = !x86_acpi_numa_init(); -#endif - -#ifdef CONFIG_AMD_NUMA - if (!acpi) - amd = !amd_numa_init(); -#endif - - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - -#ifdef CONFIG_NUMA_EMU - setup_physnodes(0, max_pfn << PAGE_SHIFT, acpi, amd); - if (cmdline && !numa_emulation(0, max_pfn, acpi, amd)) - return; - setup_physnodes(0, max_pfn << PAGE_SHIFT, acpi, amd); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); -#endif - -#ifdef CONFIG_ACPI_NUMA - if (!numa_off && acpi && !acpi_scan_nodes()) - return; - nodes_clear(node_possible_map); - nodes_clear(node_online_map); -#endif + return 0; +} -#ifdef CONFIG_AMD_NUMA - if (!numa_off && amd && !amd_scan_nodes()) - return; - nodes_clear(node_possible_map); - nodes_clear(node_online_map); -#endif +static int dummy_scan_nodes(void) +{ printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); - printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 0LU, max_pfn << PAGE_SHIFT); + /* setup dummy node covering all memory */ memnode_shift = 63; memnodemap = memnode.embedded_map; memnodemap[0] = 0; node_set_online(0); node_set(0, node_possible_map); - for (i = 0; i < MAX_LOCAL_APIC; i++) - set_apicid_to_node(i, NUMA_NO_NODE); memblock_x86_register_active_regions(0, 0, max_pfn); init_memory_mapping_high(); setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT); numa_init_array(); + + return 0; +} + +void __init initmem_init(void) +{ + int (*numa_init[])(void) = { [2] = dummy_numa_init }; + int (*scan_nodes[])(void) = { [2] = dummy_scan_nodes }; + int i, j; + + if (!numa_off) { +#ifdef CONFIG_ACPI_NUMA + numa_init[0] = x86_acpi_numa_init; + scan_nodes[0] = acpi_scan_nodes; +#endif +#ifdef CONFIG_AMD_NUMA + numa_init[1] = amd_numa_init; + scan_nodes[1] = amd_scan_nodes; +#endif + } + + for (i = 0; i < ARRAY_SIZE(numa_init); i++) { + if (!numa_init[i]) + continue; + + for (j = 0; j < MAX_LOCAL_APIC; j++) + set_apicid_to_node(j, NUMA_NO_NODE); + + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + + if (numa_init[i]() < 0) + continue; +#ifdef CONFIG_NUMA_EMU + setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1); + if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) + return; + setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); +#endif + if (!scan_nodes[i]()) + return; + } + BUG(); } unsigned long __init numa_free_all_bootmem(void) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 56b9263..597e011 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -78,8 +78,6 @@ static __init void bad_srat(void) int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; - for (i = 0; i < MAX_LOCAL_APIC; i++) - set_apicid_to_node(i, NUMA_NO_NODE); for (i = 0; i < MAX_NUMNODES; i++) { nodes[i].start = nodes[i].end = 0; nodes_add[i].start = nodes_add[i].end = 0; @@ -89,7 +87,7 @@ static __init void bad_srat(void) static __init inline int srat_disabled(void) { - return numa_off || acpi_numa < 0; + return acpi_numa < 0; } /* Callback for SLIT parsing */ -- cgit v0.10.2 From ec8cf29b1d39aeb6ef98bc589f0c9a33a8f94c49 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:07 +0100 Subject: x86-64, NUMA: Use common {cpu|mem}_nodes_parsed ACPI and amd are using separate nodes_parsed masks. Add {cpu|mem}_nodes_parsed and use them in all NUMA init methods. Initialization of the masks and building node_possible_map are now handled commonly by initmem_init(). dummy_numa_init() is updated to set node 0 on both masks. While at it, move the info messages from scan to init. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 2819afa..de45936 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -27,6 +27,9 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, */ #define NODE_MIN_SIZE (4*1024*1024) +extern nodemask_t cpu_nodes_parsed __initdata; +extern nodemask_t mem_nodes_parsed __initdata; + extern int __cpuinit numa_cpu_node(int cpu); #ifdef CONFIG_NUMA_EMU diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 655ccff..4f822a2 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -28,7 +28,6 @@ static struct bootnode __initdata nodes[8]; static unsigned char __initdata nodeids[8]; -static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; static __init int find_northbridge(void) { @@ -123,7 +122,7 @@ int __init amd_numa_init(void) nodeid, (base >> 8) & 3, (limit >> 8) & 3); return -EINVAL; } - if (node_isset(nodeid, nodes_parsed)) { + if (node_isset(nodeid, mem_nodes_parsed)) { pr_info("Node %d already present, skipping\n", nodeid); continue; @@ -173,7 +172,8 @@ int __init amd_numa_init(void) prevbase = base; - node_set(nodeid, nodes_parsed); + node_set(nodeid, mem_nodes_parsed); + node_set(nodeid, cpu_nodes_parsed); } if (!found) @@ -190,7 +190,7 @@ void __init amd_get_nodes(struct bootnode *physnodes) { int i; - for_each_node_mask(i, nodes_parsed) { + for_each_node_mask(i, mem_nodes_parsed) { physnodes[i].start = nodes[i].start; physnodes[i].end = nodes[i].end; } @@ -258,8 +258,6 @@ int __init amd_scan_nodes(void) unsigned int apicid_base; int i; - BUG_ON(nodes_empty(nodes_parsed)); - node_possible_map = nodes_parsed; memnode_shift = compute_hash_shift(nodes, 8, NULL); if (memnode_shift < 0) { pr_err("No NUMA node hash function found. Contact maintainer\n"); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index c984e34..4404e1d 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -25,6 +25,9 @@ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); +nodemask_t cpu_nodes_parsed __initdata; +nodemask_t mem_nodes_parsed __initdata; + struct memnode memnode; static unsigned long __initdata nodemap_addr; @@ -582,22 +585,23 @@ static int __init numa_emulation(unsigned long start_pfn, static int dummy_numa_init(void) { - return 0; -} - -static int dummy_scan_nodes(void) -{ printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 0LU, max_pfn << PAGE_SHIFT); + node_set(0, cpu_nodes_parsed); + node_set(0, mem_nodes_parsed); + + return 0; +} + +static int dummy_scan_nodes(void) +{ /* setup dummy node covering all memory */ memnode_shift = 63; memnodemap = memnode.embedded_map; memnodemap[0] = 0; - node_set_online(0); - node_set(0, node_possible_map); memblock_x86_register_active_regions(0, 0, max_pfn); init_memory_mapping_high(); setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT); @@ -630,6 +634,8 @@ void __init initmem_init(void) for (j = 0; j < MAX_LOCAL_APIC; j++) set_apicid_to_node(j, NUMA_NO_NODE); + nodes_clear(cpu_nodes_parsed); + nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); @@ -643,6 +649,11 @@ void __init initmem_init(void) nodes_clear(node_possible_map); nodes_clear(node_online_map); #endif + /* Account for nodes with cpus and no memory */ + nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); + if (WARN_ON(nodes_empty(node_possible_map))) + continue; + if (!scan_nodes[i]()) return; } diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 597e011..33e72ec 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -28,8 +28,6 @@ int acpi_numa __initdata; static struct acpi_table_slit *acpi_slit; -static nodemask_t nodes_parsed __initdata; -static nodemask_t cpu_nodes_parsed __initdata; static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES]; @@ -293,7 +291,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { nd = &nodes[node]; - if (!node_test_and_set(node, nodes_parsed)) { + if (!node_test_and_set(node, mem_nodes_parsed)) { nd->start = start; nd->end = end; } else { @@ -319,7 +317,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) unsigned long pxmram, e820ram; pxmram = 0; - for_each_node_mask(i, nodes_parsed) { + for_each_node_mask(i, mem_nodes_parsed) { unsigned long s = nodes[i].start >> PAGE_SHIFT; unsigned long e = nodes[i].end >> PAGE_SHIFT; pxmram += e - s; @@ -348,7 +346,7 @@ void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, { int i; - for_each_node_mask(i, nodes_parsed) { + for_each_node_mask(i, mem_nodes_parsed) { cutoff_node(i, start, end); physnodes[i].start = nodes[i].start; physnodes[i].end = nodes[i].end; @@ -449,9 +447,6 @@ int __init acpi_scan_nodes(void) init_memory_mapping_high(); - /* Account for nodes with cpus and no memory */ - nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed); - /* Finally register nodes */ for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, nodes[i].start, nodes[i].end); @@ -485,7 +480,7 @@ static int __init find_node_by_addr(unsigned long addr) int ret = NUMA_NO_NODE; int i; - for_each_node_mask(i, nodes_parsed) { + for_each_node_mask(i, mem_nodes_parsed) { /* * Find the real node that this emulated node appears on. For * the sake of simplicity, we only use a real node's starting @@ -545,10 +540,10 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); - nodes_clear(nodes_parsed); + nodes_clear(mem_nodes_parsed); for (i = 0; i < num_nodes; i++) if (fake_nodes[i].start != fake_nodes[i].end) - node_set(i, nodes_parsed); + node_set(i, mem_nodes_parsed); } static int null_slit_node_compare(int a, int b) -- cgit v0.10.2 From 99df738cd28cc39054cd1a77685d4a94ed2193a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:07 +0100 Subject: x86-64, NUMA: Remove local variable found from amd_numa_init() Use weight count on mem_nodes_parsed instead. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 4f822a2..7d85cf7 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -74,7 +74,7 @@ int __init amd_numa_init(void) unsigned long end = PFN_PHYS(max_pfn); unsigned numnodes; unsigned long prevbase; - int i, nb, found = 0; + int i, nb; u32 nodeid, reg; if (!early_pci_allowed()) @@ -165,8 +165,6 @@ int __init amd_numa_init(void) pr_info("Node %d MemBase %016lx Limit %016lx\n", nodeid, base, limit); - found++; - nodes[nodeid].start = base; nodes[nodeid].end = limit; @@ -176,7 +174,7 @@ int __init amd_numa_init(void) node_set(nodeid, cpu_nodes_parsed); } - if (!found) + if (!nodes_weight(mem_nodes_parsed)) return -ENOENT; return 0; } -- cgit v0.10.2 From 45fe6c78c4ccc384044d1b4877eebe7acf359e76 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:07 +0100 Subject: x86-64, NUMA: Move apicid to numa mapping initialization from amd_scan_nodes() to amd_numa_init() This brings amd initialization behavior closer to that of acpi. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 7d85cf7..b6029a6 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -74,8 +74,9 @@ int __init amd_numa_init(void) unsigned long end = PFN_PHYS(max_pfn); unsigned numnodes; unsigned long prevbase; - int i, nb; + int i, j, nb; u32 nodeid, reg; + unsigned int bits, cores, apicid_base; if (!early_pci_allowed()) return -EINVAL; @@ -176,6 +177,26 @@ int __init amd_numa_init(void) if (!nodes_weight(mem_nodes_parsed)) return -ENOENT; + + /* + * We seem to have valid NUMA configuration. Map apicids to nodes + * using the coreid bits from early_identify_cpu. + */ + bits = boot_cpu_data.x86_coreid_bits; + cores = 1 << bits; + apicid_base = 0; + + /* get the APIC ID of the BSP early for systems with apicid lifting */ + early_get_boot_cpu_id(); + if (boot_cpu_physical_apicid > 0) { + pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); + apicid_base = boot_cpu_physical_apicid; + } + + for_each_node_mask(i, cpu_nodes_parsed) + for (j = apicid_base; j < cores + apicid_base; j++) + set_apicid_to_node((i << bits) + j, i); + return 0; } @@ -251,9 +272,6 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) int __init amd_scan_nodes(void) { - unsigned int bits; - unsigned int cores; - unsigned int apicid_base; int i; memnode_shift = compute_hash_shift(nodes, 8, NULL); @@ -264,28 +282,13 @@ int __init amd_scan_nodes(void) pr_info("Using node hash shift of %d\n", memnode_shift); /* use the coreid bits from early_identify_cpu */ - bits = boot_cpu_data.x86_coreid_bits; - cores = (1< 0) { - pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); - apicid_base = boot_cpu_physical_apicid; - } - for_each_node_mask(i, node_possible_map) memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); init_memory_mapping_high(); - for_each_node_mask(i, node_possible_map) { - int j; - - for (j = apicid_base; j < cores + apicid_base; j++) - set_apicid_to_node((i << bits) + j, i); + for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - } numa_init_array(); return 0; -- cgit v0.10.2 From 206e42087a037fa3adca8908fd318a0cb64d4dee Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:07 +0100 Subject: x86-64, NUMA: Use common numa_nodes[] ACPI and amd are using separate nodes[] array. Add numa_nodes[] and use them in all NUMA init methods. cutoff_node() cleanup is moved from srat_64.c to numa_64.c and applied in initmem_init() regardless of init methods. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index de45936..d3a4514 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -29,6 +29,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, extern nodemask_t cpu_nodes_parsed __initdata; extern nodemask_t mem_nodes_parsed __initdata; +extern struct bootnode numa_nodes[MAX_NUMNODES] __initdata; extern int __cpuinit numa_cpu_node(int cpu); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index b6029a6..f049fa6 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -26,7 +26,6 @@ #include #include -static struct bootnode __initdata nodes[8]; static unsigned char __initdata nodeids[8]; static __init int find_northbridge(void) @@ -166,8 +165,8 @@ int __init amd_numa_init(void) pr_info("Node %d MemBase %016lx Limit %016lx\n", nodeid, base, limit); - nodes[nodeid].start = base; - nodes[nodeid].end = limit; + numa_nodes[nodeid].start = base; + numa_nodes[nodeid].end = limit; prevbase = base; @@ -210,8 +209,8 @@ void __init amd_get_nodes(struct bootnode *physnodes) int i; for_each_node_mask(i, mem_nodes_parsed) { - physnodes[i].start = nodes[i].start; - physnodes[i].end = nodes[i].end; + physnodes[i].start = numa_nodes[i].start; + physnodes[i].end = numa_nodes[i].end; } } @@ -221,7 +220,7 @@ static int __init find_node_by_addr(unsigned long addr) int i; for (i = 0; i < 8; i++) - if (addr >= nodes[i].start && addr < nodes[i].end) { + if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { ret = i; break; } @@ -274,7 +273,7 @@ int __init amd_scan_nodes(void) { int i; - memnode_shift = compute_hash_shift(nodes, 8, NULL); + memnode_shift = compute_hash_shift(numa_nodes, 8, NULL); if (memnode_shift < 0) { pr_err("No NUMA node hash function found. Contact maintainer\n"); return -1; @@ -284,11 +283,11 @@ int __init amd_scan_nodes(void) /* use the coreid bits from early_identify_cpu */ for_each_node_mask(i, node_possible_map) memblock_x86_register_active_regions(i, - nodes[i].start >> PAGE_SHIFT, - nodes[i].end >> PAGE_SHIFT); + numa_nodes[i].start >> PAGE_SHIFT, + numa_nodes[i].end >> PAGE_SHIFT); init_memory_mapping_high(); for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); numa_init_array(); return 0; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 4404e1d..a6b899f 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -33,6 +33,8 @@ struct memnode memnode; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; +struct bootnode numa_nodes[MAX_NUMNODES] __initdata; + /* * Given a shift value, try to populate memnodemap[] * Returns : @@ -182,6 +184,22 @@ static void * __init early_node_mem(int nodeid, unsigned long start, return NULL; } +static __init void cutoff_node(int i, unsigned long start, unsigned long end) +{ + struct bootnode *nd = &numa_nodes[i]; + + if (nd->start < start) { + nd->start = start; + if (nd->end < nd->start) + nd->start = nd->end; + } + if (nd->end > end) { + nd->end = end; + if (nd->start > nd->end) + nd->start = nd->end; + } +} + /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) @@ -638,9 +656,15 @@ void __init initmem_init(void) nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); + memset(numa_nodes, 0, sizeof(numa_nodes)); if (numa_init[i]() < 0) continue; + + /* clean up the node list */ + for (j = 0; j < MAX_NUMNODES; j++) + cutoff_node(j, 0, max_pfn << PAGE_SHIFT); + #ifdef CONFIG_NUMA_EMU setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1); if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 33e72ec..bfa4a6a 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -28,7 +28,6 @@ int acpi_numa __initdata; static struct acpi_table_slit *acpi_slit; -static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES]; static int num_node_memblks __initdata; @@ -55,29 +54,13 @@ static __init int conflicting_memblks(unsigned long start, unsigned long end) return -1; } -static __init void cutoff_node(int i, unsigned long start, unsigned long end) -{ - struct bootnode *nd = &nodes[i]; - - if (nd->start < start) { - nd->start = start; - if (nd->end < nd->start) - nd->start = nd->end; - } - if (nd->end > end) { - nd->end = end; - if (nd->start > nd->end) - nd->start = nd->end; - } -} - static __init void bad_srat(void) { int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; for (i = 0; i < MAX_NUMNODES; i++) { - nodes[i].start = nodes[i].end = 0; + numa_nodes[i].start = numa_nodes[i].end = 0; nodes_add[i].start = nodes_add[i].end = 0; } remove_all_active_ranges(); @@ -276,12 +259,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) if (i == node) { printk(KERN_WARNING "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", - pxm, start, end, nodes[i].start, nodes[i].end); + pxm, start, end, numa_nodes[i].start, numa_nodes[i].end); } else if (i >= 0) { printk(KERN_ERR "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", pxm, start, end, node_to_pxm(i), - nodes[i].start, nodes[i].end); + numa_nodes[i].start, numa_nodes[i].end); bad_srat(); return; } @@ -290,7 +273,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) start, end); if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { - nd = &nodes[node]; + nd = &numa_nodes[node]; if (!node_test_and_set(node, mem_nodes_parsed)) { nd->start = start; nd->end = end; @@ -347,9 +330,8 @@ void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, int i; for_each_node_mask(i, mem_nodes_parsed) { - cutoff_node(i, start, end); - physnodes[i].start = nodes[i].start; - physnodes[i].end = nodes[i].end; + physnodes[i].start = numa_nodes[i].start; + physnodes[i].end = numa_nodes[i].end; } } #endif /* CONFIG_NUMA_EMU */ @@ -372,10 +354,6 @@ int __init acpi_scan_nodes(void) if (acpi_numa <= 0) return -1; - /* First clean up the node list */ - for (i = 0; i < MAX_NUMNODES; i++) - cutoff_node(i, 0, max_pfn << PAGE_SHIFT); - /* * Join together blocks on the same node, holes between * which don't overlap with memory on other nodes. @@ -440,7 +418,7 @@ int __init acpi_scan_nodes(void) /* for out of order entries in SRAT */ sort_node_map(); - if (!nodes_cover_memory(nodes)) { + if (!nodes_cover_memory(numa_nodes)) { bad_srat(); return -1; } @@ -449,12 +427,13 @@ int __init acpi_scan_nodes(void) /* Finally register nodes */ for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); /* Try again in case setup_node_bootmem missed one due to missing bootmem */ for_each_node_mask(i, node_possible_map) if (!node_online(i)) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + setup_node_bootmem(i, numa_nodes[i].start, + numa_nodes[i].end); for (i = 0; i < nr_cpu_ids; i++) { int node = early_cpu_to_node(i); @@ -486,7 +465,7 @@ static int __init find_node_by_addr(unsigned long addr) * the sake of simplicity, we only use a real node's starting * address to determine which emulated node it appears on. */ - if (addr >= nodes[i].start && addr < nodes[i].end) { + if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { ret = i; break; } -- cgit v0.10.2 From 19095548704ecd0f32fd5deba01d56430ad7a344 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:07 +0100 Subject: x86-64, NUMA: Kill {acpi|amd}_get_nodes() With common numa_nodes[], common code in numa_64.c can access it directly. Copy directly and kill {acpi|amd}_get_nodes(). Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 06fb786..446a5b9 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -185,8 +185,6 @@ struct bootnode; #ifdef CONFIG_ACPI_NUMA extern int acpi_numa; -extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start, - unsigned long end); extern int x86_acpi_numa_init(void); extern int acpi_scan_nodes(void); #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index dc3c6e3..246cdc6 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -23,7 +23,6 @@ extern int amd_set_subcaches(int, int); #ifdef CONFIG_NUMA_EMU extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); -extern void amd_get_nodes(struct bootnode *nodes); #endif struct amd_northbridge { diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index f049fa6..cf29527 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -204,16 +204,6 @@ static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; -void __init amd_get_nodes(struct bootnode *physnodes) -{ - int i; - - for_each_node_mask(i, mem_nodes_parsed) { - physnodes[i].start = numa_nodes[i].start; - physnodes[i].end = numa_nodes[i].end; - } -} - static int __init find_node_by_addr(unsigned long addr) { int ret = NUMA_NO_NODE; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a6b899f..82ee308 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -257,21 +257,18 @@ void __init numa_emu_cmdline(char *str) cmdline = str; } -static int __init setup_physnodes(unsigned long start, unsigned long end, - int acpi, int amd) +static int __init setup_physnodes(unsigned long start, unsigned long end) { int ret = 0; int i; memset(physnodes, 0, sizeof(physnodes)); -#ifdef CONFIG_ACPI_NUMA - if (acpi) - acpi_get_nodes(physnodes, start, end); -#endif -#ifdef CONFIG_AMD_NUMA - if (amd) - amd_get_nodes(physnodes); -#endif + + for_each_node_mask(i, mem_nodes_parsed) { + physnodes[i].start = numa_nodes[i].start; + physnodes[i].end = numa_nodes[i].end; + } + /* * Basic sanity checking on the physical node map: there may be errors * if the SRAT or AMD code incorrectly reported the topology or the mem= @@ -594,7 +591,7 @@ static int __init numa_emulation(unsigned long start_pfn, init_memory_mapping_high(); for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - setup_physnodes(addr, max_addr, acpi, amd); + setup_physnodes(addr, max_addr); fake_physnodes(acpi, amd, num_nodes); numa_init_array(); return 0; @@ -666,10 +663,10 @@ void __init initmem_init(void) cutoff_node(j, 0, max_pfn << PAGE_SHIFT); #ifdef CONFIG_NUMA_EMU - setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1); + setup_physnodes(0, max_pfn << PAGE_SHIFT); if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) return; - setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1); + setup_physnodes(0, max_pfn << PAGE_SHIFT); nodes_clear(node_possible_map); nodes_clear(node_online_map); #endif diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index bfa4a6a..82b1087 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -323,19 +323,6 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) void __init acpi_numa_arch_fixup(void) {} -#ifdef CONFIG_NUMA_EMU -void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, - unsigned long end) -{ - int i; - - for_each_node_mask(i, mem_nodes_parsed) { - physnodes[i].start = numa_nodes[i].start; - physnodes[i].end = numa_nodes[i].end; - } -} -#endif /* CONFIG_NUMA_EMU */ - int __init x86_acpi_numa_init(void) { int ret; -- cgit v0.10.2 From ef396ec96c1a8ffd2b0bc67f1f79c7274de02b95 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:07 +0100 Subject: x86-64, NUMA: Factor out memblk handling into numa_{add|register}_memblk() Factor out memblk handling from srat_64.c into two functions in numa_64.c. This patch doesn't introduce any behavior change. The next patch will make all init methods use these functions. - v2: Fixed build failure on 32bit due to misplaced NR_NODE_MEMBLKS. Reported by Ingo. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 446a5b9..12bd1fd 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -187,7 +187,6 @@ struct bootnode; extern int acpi_numa; extern int x86_acpi_numa_init(void); extern int acpi_scan_nodes(void); -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) #ifdef CONFIG_NUMA_EMU extern void acpi_fake_nodes(const struct bootnode *fake_nodes, diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 26fc6e2..3d4dab4 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -5,6 +5,9 @@ #include #ifdef CONFIG_NUMA + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) + /* * __apicid_to_node[] stores the raw mapping between physical apicid and * node and is used to initialize cpu_to_node mapping. diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index d3a4514..3306a2b 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -32,6 +32,8 @@ extern nodemask_t mem_nodes_parsed __initdata; extern struct bootnode numa_nodes[MAX_NUMNODES] __initdata; extern int __cpuinit numa_cpu_node(int cpu); +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern int __init numa_register_memblks(void); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 82ee308..a1d702d 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -33,6 +33,10 @@ struct memnode memnode; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; +static int num_node_memblks __initdata; +static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata; +static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata; + struct bootnode numa_nodes[MAX_NUMNODES] __initdata; /* @@ -184,6 +188,43 @@ static void * __init early_node_mem(int nodeid, unsigned long start, return NULL; } +static __init int conflicting_memblks(unsigned long start, unsigned long end) +{ + int i; + for (i = 0; i < num_node_memblks; i++) { + struct bootnode *nd = &node_memblk_range[i]; + if (nd->start == nd->end) + continue; + if (nd->end > start && nd->start < end) + return memblk_nodeid[i]; + if (nd->end == end && nd->start == start) + return memblk_nodeid[i]; + } + return -1; +} + +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + int i; + + i = conflicting_memblks(start, end); + if (i == nid) { + printk(KERN_WARNING "NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", + nid, start, end, numa_nodes[i].start, numa_nodes[i].end); + } else if (i >= 0) { + printk(KERN_ERR "NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", + nid, start, end, i, + numa_nodes[i].start, numa_nodes[i].end); + return -EINVAL; + } + + node_memblk_range[num_node_memblks].start = start; + node_memblk_range[num_node_memblks].end = end; + memblk_nodeid[num_node_memblks] = nid; + num_node_memblks++; + return 0; +} + static __init void cutoff_node(int i, unsigned long start, unsigned long end) { struct bootnode *nd = &numa_nodes[i]; @@ -246,6 +287,71 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } +int __init numa_register_memblks(void) +{ + int i; + + /* + * Join together blocks on the same node, holes between + * which don't overlap with memory on other nodes. + */ + for (i = 0; i < num_node_memblks; ++i) { + int j, k; + + for (j = i + 1; j < num_node_memblks; ++j) { + unsigned long start, end; + + if (memblk_nodeid[i] != memblk_nodeid[j]) + continue; + start = min(node_memblk_range[i].end, + node_memblk_range[j].end); + end = max(node_memblk_range[i].start, + node_memblk_range[j].start); + for (k = 0; k < num_node_memblks; ++k) { + if (memblk_nodeid[i] == memblk_nodeid[k]) + continue; + if (start < node_memblk_range[k].end && + end > node_memblk_range[k].start) + break; + } + if (k < num_node_memblks) + continue; + start = min(node_memblk_range[i].start, + node_memblk_range[j].start); + end = max(node_memblk_range[i].end, + node_memblk_range[j].end); + printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", + memblk_nodeid[i], + node_memblk_range[i].start, + node_memblk_range[i].end, + node_memblk_range[j].start, + node_memblk_range[j].end, + start, end); + node_memblk_range[i].start = start; + node_memblk_range[i].end = end; + k = --num_node_memblks - j; + memmove(memblk_nodeid + j, memblk_nodeid + j+1, + k * sizeof(*memblk_nodeid)); + memmove(node_memblk_range + j, node_memblk_range + j+1, + k * sizeof(*node_memblk_range)); + --j; + } + } + + memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, + memblk_nodeid); + if (memnode_shift < 0) { + printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); + return -EINVAL; + } + + for (i = 0; i < num_node_memblks; i++) + memblock_x86_register_active_regions(memblk_nodeid[i], + node_memblk_range[i].start >> PAGE_SHIFT, + node_memblk_range[i].end >> PAGE_SHIFT); + return 0; +} + #ifdef CONFIG_NUMA_EMU /* Numa emulation */ static struct bootnode nodes[MAX_NUMNODES] __initdata; @@ -653,6 +759,9 @@ void __init initmem_init(void) nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); + num_node_memblks = 0; + memset(node_memblk_range, 0, sizeof(node_memblk_range)); + memset(memblk_nodeid, 0, sizeof(memblk_nodeid)); memset(numa_nodes, 0, sizeof(numa_nodes)); if (numa_init[i]() < 0) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 82b1087..341b371 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -30,30 +30,11 @@ static struct acpi_table_slit *acpi_slit; static struct bootnode nodes_add[MAX_NUMNODES]; -static int num_node_memblks __initdata; -static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata; -static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata; - static __init int setup_node(int pxm) { return acpi_map_pxm_to_node(pxm); } -static __init int conflicting_memblks(unsigned long start, unsigned long end) -{ - int i; - for (i = 0; i < num_node_memblks; i++) { - struct bootnode *nd = &node_memblk_range[i]; - if (nd->start == nd->end) - continue; - if (nd->end > start && nd->start < end) - return memblk_nodeid[i]; - if (nd->end == end && nd->start == start) - return memblk_nodeid[i]; - } - return -1; -} - static __init void bad_srat(void) { int i; @@ -233,7 +214,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) struct bootnode *nd; unsigned long start, end; int node, pxm; - int i; if (srat_disabled()) return; @@ -255,16 +235,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) bad_srat(); return; } - i = conflicting_memblks(start, end); - if (i == node) { - printk(KERN_WARNING - "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", - pxm, start, end, numa_nodes[i].start, numa_nodes[i].end); - } else if (i >= 0) { - printk(KERN_ERR - "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", - pxm, start, end, node_to_pxm(i), - numa_nodes[i].start, numa_nodes[i].end); + + if (numa_add_memblk(node, start, end) < 0) { bad_srat(); return; } @@ -285,11 +257,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) } } else update_nodes_add(node, start, end); - - node_memblk_range[num_node_memblks].start = start; - node_memblk_range[num_node_memblks].end = end; - memblk_nodeid[num_node_memblks] = node; - num_node_memblks++; } /* Sanity check to catch more bad SRATs (they are amazingly common). @@ -341,68 +308,11 @@ int __init acpi_scan_nodes(void) if (acpi_numa <= 0) return -1; - /* - * Join together blocks on the same node, holes between - * which don't overlap with memory on other nodes. - */ - for (i = 0; i < num_node_memblks; ++i) { - int j, k; - - for (j = i + 1; j < num_node_memblks; ++j) { - unsigned long start, end; - - if (memblk_nodeid[i] != memblk_nodeid[j]) - continue; - start = min(node_memblk_range[i].end, - node_memblk_range[j].end); - end = max(node_memblk_range[i].start, - node_memblk_range[j].start); - for (k = 0; k < num_node_memblks; ++k) { - if (memblk_nodeid[i] == memblk_nodeid[k]) - continue; - if (start < node_memblk_range[k].end && - end > node_memblk_range[k].start) - break; - } - if (k < num_node_memblks) - continue; - start = min(node_memblk_range[i].start, - node_memblk_range[j].start); - end = max(node_memblk_range[i].end, - node_memblk_range[j].end); - printk(KERN_INFO "SRAT: Node %d " - "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", - memblk_nodeid[i], - node_memblk_range[i].start, - node_memblk_range[i].end, - node_memblk_range[j].start, - node_memblk_range[j].end, - start, end); - node_memblk_range[i].start = start; - node_memblk_range[i].end = end; - k = --num_node_memblks - j; - memmove(memblk_nodeid + j, memblk_nodeid + j+1, - k * sizeof(*memblk_nodeid)); - memmove(node_memblk_range + j, node_memblk_range + j+1, - k * sizeof(*node_memblk_range)); - --j; - } - } - - memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, - memblk_nodeid); - if (memnode_shift < 0) { - printk(KERN_ERR - "SRAT: No NUMA node hash function found. Contact maintainer\n"); + if (numa_register_memblks() < 0) { bad_srat(); return -1; } - for (i = 0; i < num_node_memblks; i++) - memblock_x86_register_active_regions(memblk_nodeid[i], - node_memblk_range[i].start >> PAGE_SHIFT, - node_memblk_range[i].end >> PAGE_SHIFT); - /* for out of order entries in SRAT */ sort_node_map(); if (!nodes_cover_memory(numa_nodes)) { -- cgit v0.10.2 From 43a662f04f731c331706456c9852ef7146ba5d85 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:08 +0100 Subject: x86-64, NUMA: Unify use of memblk in all init methods Make both amd and dummy use numa_add_memblk() to describe the detected memory blocks. This allows initmem_init() to call numa_register_memblk() regardless of init method in use. Drop custom memory registration codes from amd and dummy. After this change, memblk merge/cleanup in numa_register_memblks() is applied to all init methods. As this makes compute_hash_shift() and numa_register_memblks() used only inside numa_64.c, make them static. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 3306a2b..e925605 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -8,9 +8,6 @@ struct bootnode { u64 end; }; -extern int compute_hash_shift(struct bootnode *nodes, int numblks, - int *nodeids); - #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) extern int numa_off; @@ -33,7 +30,6 @@ extern struct bootnode numa_nodes[MAX_NUMNODES] __initdata; extern int __cpuinit numa_cpu_node(int cpu); extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); -extern int __init numa_register_memblks(void); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index cf29527..d6d7aa4 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -167,6 +167,7 @@ int __init amd_numa_init(void) numa_nodes[nodeid].start = base; numa_nodes[nodeid].end = limit; + numa_add_memblk(nodeid, base, limit); prevbase = base; @@ -263,18 +264,6 @@ int __init amd_scan_nodes(void) { int i; - memnode_shift = compute_hash_shift(numa_nodes, 8, NULL); - if (memnode_shift < 0) { - pr_err("No NUMA node hash function found. Contact maintainer\n"); - return -1; - } - pr_info("Using node hash shift of %d\n", memnode_shift); - - /* use the coreid bits from early_identify_cpu */ - for_each_node_mask(i, node_possible_map) - memblock_x86_register_active_regions(i, - numa_nodes[i].start >> PAGE_SHIFT, - numa_nodes[i].end >> PAGE_SHIFT); init_memory_mapping_high(); for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a1d702d..552080e 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -131,8 +131,8 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes, return i; } -int __init compute_hash_shift(struct bootnode *nodes, int numnodes, - int *nodeids) +static int __init compute_hash_shift(struct bootnode *nodes, int numnodes, + int *nodeids) { int shift; @@ -287,7 +287,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } -int __init numa_register_memblks(void) +static int __init numa_register_memblks(void) { int i; @@ -713,17 +713,13 @@ static int dummy_numa_init(void) node_set(0, cpu_nodes_parsed); node_set(0, mem_nodes_parsed); + numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); return 0; } static int dummy_scan_nodes(void) { - /* setup dummy node covering all memory */ - memnode_shift = 63; - memnodemap = memnode.embedded_map; - memnodemap[0] = 0; - memblock_x86_register_active_regions(0, 0, max_pfn); init_memory_mapping_high(); setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT); numa_init_array(); @@ -784,6 +780,9 @@ void __init initmem_init(void) if (WARN_ON(nodes_empty(node_possible_map))) continue; + if (numa_register_memblks() < 0) + continue; + if (!scan_nodes[i]()) return; } diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 341b371..69f1471 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -308,11 +308,6 @@ int __init acpi_scan_nodes(void) if (acpi_numa <= 0) return -1; - if (numa_register_memblks() < 0) { - bad_srat(); - return -1; - } - /* for out of order entries in SRAT */ sort_node_map(); if (!nodes_cover_memory(numa_nodes)) { -- cgit v0.10.2 From fd0435d8fb1d4e5771f9ae3af71f2a77c1f4bd09 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:08 +0100 Subject: x86-64, NUMA: Unify the rest of memblk registration Move the remaining memblk registration logic from acpi_scan_nodes() to numa_register_memblks() and initmem_init(). This applies nodes_cover_memory() sanity check, memory node sorting and node_online() checking, which were only applied to acpi, to all init methods. As all memblk registration is moved to common code, active range clearing is moved to initmem_init() too and removed from bad_srat(). Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index d6d7aa4..9c9f46a 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -262,12 +262,5 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) int __init amd_scan_nodes(void) { - int i; - - init_memory_mapping_high(); - for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); - - numa_init_array(); return 0; } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 552080e..748c6b5 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -287,6 +287,37 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } +/* + * Sanity check to catch more bad NUMA configurations (they are amazingly + * common). Make sure the nodes cover all memory. + */ +static int __init nodes_cover_memory(const struct bootnode *nodes) +{ + unsigned long numaram, e820ram; + int i; + + numaram = 0; + for_each_node_mask(i, mem_nodes_parsed) { + unsigned long s = nodes[i].start >> PAGE_SHIFT; + unsigned long e = nodes[i].end >> PAGE_SHIFT; + numaram += e - s; + numaram -= __absent_pages_in_range(i, s, e); + if ((long)numaram < 0) + numaram = 0; + } + + e820ram = max_pfn - + (memblock_x86_hole_size(0, max_pfn<> PAGE_SHIFT); + /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ + if ((long)(e820ram - numaram) >= (1<<(20 - PAGE_SHIFT))) { + printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", + (numaram << PAGE_SHIFT) >> 20, + (e820ram << PAGE_SHIFT) >> 20); + return 0; + } + return 1; +} + static int __init numa_register_memblks(void) { int i; @@ -349,6 +380,27 @@ static int __init numa_register_memblks(void) memblock_x86_register_active_regions(memblk_nodeid[i], node_memblk_range[i].start >> PAGE_SHIFT, node_memblk_range[i].end >> PAGE_SHIFT); + + /* for out of order entries */ + sort_node_map(); + if (!nodes_cover_memory(numa_nodes)) + return -EINVAL; + + init_memory_mapping_high(); + + /* Finally register nodes. */ + for_each_node_mask(i, node_possible_map) + setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); + + /* + * Try again in case setup_node_bootmem missed one due to missing + * bootmem. + */ + for_each_node_mask(i, node_possible_map) + if (!node_online(i)) + setup_node_bootmem(i, numa_nodes[i].start, + numa_nodes[i].end); + return 0; } @@ -714,16 +766,14 @@ static int dummy_numa_init(void) node_set(0, cpu_nodes_parsed); node_set(0, mem_nodes_parsed); numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); + numa_nodes[0].start = 0; + numa_nodes[0].end = (u64)max_pfn << PAGE_SHIFT; return 0; } static int dummy_scan_nodes(void) { - init_memory_mapping_high(); - setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT); - numa_init_array(); - return 0; } @@ -759,6 +809,7 @@ void __init initmem_init(void) memset(node_memblk_range, 0, sizeof(node_memblk_range)); memset(memblk_nodeid, 0, sizeof(memblk_nodeid)); memset(numa_nodes, 0, sizeof(numa_nodes)); + remove_all_active_ranges(); if (numa_init[i]() < 0) continue; @@ -783,8 +834,19 @@ void __init initmem_init(void) if (numa_register_memblks() < 0) continue; - if (!scan_nodes[i]()) - return; + if (scan_nodes[i]() < 0) + continue; + + for (j = 0; j < nr_cpu_ids; j++) { + int nid = early_cpu_to_node(j); + + if (nid == NUMA_NO_NODE) + continue; + if (!node_online(nid)) + numa_clear_node(j); + } + numa_init_array(); + return; } BUG(); } diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 69f1471..4a2c33b 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -44,7 +44,6 @@ static __init void bad_srat(void) numa_nodes[i].start = numa_nodes[i].end = 0; nodes_add[i].start = nodes_add[i].end = 0; } - remove_all_active_ranges(); } static __init inline int srat_disabled(void) @@ -259,35 +258,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) update_nodes_add(node, start, end); } -/* Sanity check to catch more bad SRATs (they are amazingly common). - Make sure the PXMs cover all memory. */ -static int __init nodes_cover_memory(const struct bootnode *nodes) -{ - int i; - unsigned long pxmram, e820ram; - - pxmram = 0; - for_each_node_mask(i, mem_nodes_parsed) { - unsigned long s = nodes[i].start >> PAGE_SHIFT; - unsigned long e = nodes[i].end >> PAGE_SHIFT; - pxmram += e - s; - pxmram -= __absent_pages_in_range(i, s, e); - if ((long)pxmram < 0) - pxmram = 0; - } - - e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<>PAGE_SHIFT); - /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ - if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { - printk(KERN_ERR - "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", - (pxmram << PAGE_SHIFT) >> 20, - (e820ram << PAGE_SHIFT) >> 20); - return 0; - } - return 1; -} - void __init acpi_numa_arch_fixup(void) {} int __init x86_acpi_numa_init(void) @@ -303,39 +273,8 @@ int __init x86_acpi_numa_init(void) /* Use the information discovered above to actually set up the nodes. */ int __init acpi_scan_nodes(void) { - int i; - if (acpi_numa <= 0) return -1; - - /* for out of order entries in SRAT */ - sort_node_map(); - if (!nodes_cover_memory(numa_nodes)) { - bad_srat(); - return -1; - } - - init_memory_mapping_high(); - - /* Finally register nodes */ - for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); - /* Try again in case setup_node_bootmem missed one due - to missing bootmem */ - for_each_node_mask(i, node_possible_map) - if (!node_online(i)) - setup_node_bootmem(i, numa_nodes[i].start, - numa_nodes[i].end); - - for (i = 0; i < nr_cpu_ids; i++) { - int node = early_cpu_to_node(i); - - if (node == NUMA_NO_NODE) - continue; - if (!node_online(node)) - numa_clear_node(i); - } - numa_init_array(); return 0; } -- cgit v0.10.2 From 5d371b08fea80c4fb7450d31e5a4e35b438ef850 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:08 +0100 Subject: x86-64, NUMA: Kill {acpi|amd|dummy}_scan_nodes() They are empty now. Kill them. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 12bd1fd..cfa3d5c 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -186,7 +186,6 @@ struct bootnode; #ifdef CONFIG_ACPI_NUMA extern int acpi_numa; extern int x86_acpi_numa_init(void); -extern int acpi_scan_nodes(void); #ifdef CONFIG_NUMA_EMU extern void acpi_fake_nodes(const struct bootnode *fake_nodes, diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 246cdc6..384d118 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -17,7 +17,6 @@ extern int early_is_amd_nb(u32 value); extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(void); -extern int amd_scan_nodes(void); extern int amd_get_subcaches(int); extern int amd_set_subcaches(int, int); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 9c9f46a..90cf297 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -259,8 +259,3 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); } #endif /* CONFIG_NUMA_EMU */ - -int __init amd_scan_nodes(void) -{ - return 0; -} diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 748c6b5..e211c00 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -772,25 +772,17 @@ static int dummy_numa_init(void) return 0; } -static int dummy_scan_nodes(void) -{ - return 0; -} - void __init initmem_init(void) { int (*numa_init[])(void) = { [2] = dummy_numa_init }; - int (*scan_nodes[])(void) = { [2] = dummy_scan_nodes }; int i, j; if (!numa_off) { #ifdef CONFIG_ACPI_NUMA numa_init[0] = x86_acpi_numa_init; - scan_nodes[0] = acpi_scan_nodes; #endif #ifdef CONFIG_AMD_NUMA numa_init[1] = amd_numa_init; - scan_nodes[1] = amd_scan_nodes; #endif } @@ -834,9 +826,6 @@ void __init initmem_init(void) if (numa_register_memblks() < 0) continue; - if (scan_nodes[i]() < 0) - continue; - for (j = 0; j < nr_cpu_ids; j++) { int nid = early_cpu_to_node(j); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 4a2c33b..d56eff8 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -270,14 +270,6 @@ int __init x86_acpi_numa_init(void) return srat_disabled() ? -EINVAL : 0; } -/* Use the information discovered above to actually set up the nodes. */ -int __init acpi_scan_nodes(void) -{ - if (acpi_numa <= 0) - return -1; - return 0; -} - #ifdef CONFIG_NUMA_EMU static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = { [0 ... MAX_NUMNODES-1] = PXM_INVAL -- cgit v0.10.2 From 8968dab8ad90ea16ef92f2406868354ea3ab6bb9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:08 +0100 Subject: x86-64, NUMA: Remove %NULL @nodeids handling from compute_hash_shift() numa_emulation() called compute_hash_shift() with %NULL @nodeids which meant identity mapping between index and nodeid. Make numa_emulation() build identity array and drop %NULL @nodeids handling from populate_memnodemap() and thus from compute_hash_shift(). This is to prepare for transition to using memblks instead. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index e211c00..243d18d 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -63,12 +63,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes, do { if (memnodemap[addr >> shift] != NUMA_NO_NODE) return -1; - - if (!nodeids) - memnodemap[addr >> shift] = i; - else - memnodemap[addr >> shift] = nodeids[i]; - + memnodemap[addr >> shift] = nodeids[i]; addr += (1UL << shift); } while (addr < end); res = 1; @@ -706,6 +701,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) static int __init numa_emulation(unsigned long start_pfn, unsigned long last_pfn, int acpi, int amd) { + static int nodeid[NR_NODE_MEMBLKS] __initdata; u64 addr = start_pfn << PAGE_SHIFT; u64 max_addr = last_pfn << PAGE_SHIFT; int num_nodes; @@ -730,7 +726,11 @@ static int __init numa_emulation(unsigned long start_pfn, if (num_nodes < 0) return num_nodes; - memnode_shift = compute_hash_shift(nodes, num_nodes, NULL); + + for (i = 0; i < ARRAY_SIZE(nodeid); i++) + nodeid[i] = i; + + memnode_shift = compute_hash_shift(nodes, num_nodes, nodeid); if (memnode_shift < 0) { memnode_shift = 0; printk(KERN_ERR "No NUMA hash function found. NUMA emulation " -- cgit v0.10.2 From 97e7b78d0674882a0aae043fda428c583dbb225d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:08 +0100 Subject: x86-64, NUMA: Introduce struct numa_meminfo Arrays for memblks and nodeids and their length lived in separate variables making things unnecessarily cumbersome. Introduce struct numa_meminfo which contains all memory configuration info. This patch doesn't cause any behavior change. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 243d18d..c3496e2 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -22,6 +22,17 @@ #include #include +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; + struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); @@ -33,9 +44,7 @@ struct memnode memnode; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; -static int num_node_memblks __initdata; -static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata; -static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata; +static struct numa_meminfo numa_meminfo __initdata; struct bootnode numa_nodes[MAX_NUMNODES] __initdata; @@ -46,16 +55,15 @@ struct bootnode numa_nodes[MAX_NUMNODES] __initdata; * 0 if memnodmap[] too small (of shift too small) * -1 if node overlap or lost ram (shift too big) */ -static int __init populate_memnodemap(const struct bootnode *nodes, - int numnodes, int shift, int *nodeids) +static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift) { unsigned long addr, end; int i, res = -1; memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); - for (i = 0; i < numnodes; i++) { - addr = nodes[i].start; - end = nodes[i].end; + for (i = 0; i < mi->nr_blks; i++) { + addr = mi->blk[i].start; + end = mi->blk[i].end; if (addr >= end) continue; if ((end >> shift) >= memnodemapsize) @@ -63,7 +71,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes, do { if (memnodemap[addr >> shift] != NUMA_NO_NODE) return -1; - memnodemap[addr >> shift] = nodeids[i]; + memnodemap[addr >> shift] = mi->blk[i].nid; addr += (1UL << shift); } while (addr < end); res = 1; @@ -101,16 +109,15 @@ static int __init allocate_cachealigned_memnodemap(void) * The LSB of all start and end addresses in the node map is the value of the * maximum possible shift. */ -static int __init extract_lsb_from_nodes(const struct bootnode *nodes, - int numnodes) +static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi) { int i, nodes_used = 0; unsigned long start, end; unsigned long bitfield = 0, memtop = 0; - for (i = 0; i < numnodes; i++) { - start = nodes[i].start; - end = nodes[i].end; + for (i = 0; i < mi->nr_blks; i++) { + start = mi->blk[i].start; + end = mi->blk[i].end; if (start >= end) continue; bitfield |= start; @@ -126,18 +133,17 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes, return i; } -static int __init compute_hash_shift(struct bootnode *nodes, int numnodes, - int *nodeids) +static int __init compute_hash_shift(const struct numa_meminfo *mi) { int shift; - shift = extract_lsb_from_nodes(nodes, numnodes); + shift = extract_lsb_from_nodes(mi); if (allocate_cachealigned_memnodemap()) return -1; printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", shift); - if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) { + if (populate_memnodemap(mi, shift) != 1) { printk(KERN_INFO "Your memory is not aligned you need to " "rebuild your kernel with a bigger NODEMAPSIZE " "shift=%d\n", shift); @@ -185,21 +191,25 @@ static void * __init early_node_mem(int nodeid, unsigned long start, static __init int conflicting_memblks(unsigned long start, unsigned long end) { + struct numa_meminfo *mi = &numa_meminfo; int i; - for (i = 0; i < num_node_memblks; i++) { - struct bootnode *nd = &node_memblk_range[i]; - if (nd->start == nd->end) + + for (i = 0; i < mi->nr_blks; i++) { + struct numa_memblk *blk = &mi->blk[i]; + + if (blk->start == blk->end) continue; - if (nd->end > start && nd->start < end) - return memblk_nodeid[i]; - if (nd->end == end && nd->start == start) - return memblk_nodeid[i]; + if (blk->end > start && blk->start < end) + return blk->nid; + if (blk->end == end && blk->start == start) + return blk->nid; } return -1; } int __init numa_add_memblk(int nid, u64 start, u64 end) { + struct numa_meminfo *mi = &numa_meminfo; int i; i = conflicting_memblks(start, end); @@ -213,10 +223,10 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) return -EINVAL; } - node_memblk_range[num_node_memblks].start = start; - node_memblk_range[num_node_memblks].end = end; - memblk_nodeid[num_node_memblks] = nid; - num_node_memblks++; + mi->blk[mi->nr_blks].start = start; + mi->blk[mi->nr_blks].end = end; + mi->blk[mi->nr_blks].nid = nid; + mi->nr_blks++; return 0; } @@ -315,66 +325,59 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) static int __init numa_register_memblks(void) { + struct numa_meminfo *mi = &numa_meminfo; int i; /* * Join together blocks on the same node, holes between * which don't overlap with memory on other nodes. */ - for (i = 0; i < num_node_memblks; ++i) { + for (i = 0; i < mi->nr_blks; ++i) { + struct numa_memblk *bi = &mi->blk[i]; int j, k; - for (j = i + 1; j < num_node_memblks; ++j) { + for (j = i + 1; j < mi->nr_blks; ++j) { + struct numa_memblk *bj = &mi->blk[j]; unsigned long start, end; - if (memblk_nodeid[i] != memblk_nodeid[j]) + if (bi->nid != bj->nid) continue; - start = min(node_memblk_range[i].end, - node_memblk_range[j].end); - end = max(node_memblk_range[i].start, - node_memblk_range[j].start); - for (k = 0; k < num_node_memblks; ++k) { - if (memblk_nodeid[i] == memblk_nodeid[k]) + start = min(bi->end, bj->end); + end = max(bi->start, bj->start); + for (k = 0; k < mi->nr_blks; ++k) { + struct numa_memblk *bk = &mi->blk[k]; + + if (bi->nid == bk->nid) continue; - if (start < node_memblk_range[k].end && - end > node_memblk_range[k].start) + if (start < bk->end && end > bk->start) break; } - if (k < num_node_memblks) + if (k < mi->nr_blks) continue; - start = min(node_memblk_range[i].start, - node_memblk_range[j].start); - end = max(node_memblk_range[i].end, - node_memblk_range[j].end); + start = min(bi->start, bj->start); + end = max(bi->end, bj->end); printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", - memblk_nodeid[i], - node_memblk_range[i].start, - node_memblk_range[i].end, - node_memblk_range[j].start, - node_memblk_range[j].end, + bi->nid, bi->start, bi->end, bj->start, bj->end, start, end); - node_memblk_range[i].start = start; - node_memblk_range[i].end = end; - k = --num_node_memblks - j; - memmove(memblk_nodeid + j, memblk_nodeid + j+1, - k * sizeof(*memblk_nodeid)); - memmove(node_memblk_range + j, node_memblk_range + j+1, - k * sizeof(*node_memblk_range)); + bi->start = start; + bi->end = end; + k = --mi->nr_blks - j; + memmove(mi->blk + j, mi->blk + j + 1, + k * sizeof(mi->blk[0])); --j; } } - memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, - memblk_nodeid); + memnode_shift = compute_hash_shift(mi); if (memnode_shift < 0) { printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); return -EINVAL; } - for (i = 0; i < num_node_memblks; i++) - memblock_x86_register_active_regions(memblk_nodeid[i], - node_memblk_range[i].start >> PAGE_SHIFT, - node_memblk_range[i].end >> PAGE_SHIFT); + for (i = 0; i < mi->nr_blks; i++) + memblock_x86_register_active_regions(mi->blk[i].nid, + mi->blk[i].start >> PAGE_SHIFT, + mi->blk[i].end >> PAGE_SHIFT); /* for out of order entries */ sort_node_map(); @@ -701,7 +704,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) static int __init numa_emulation(unsigned long start_pfn, unsigned long last_pfn, int acpi, int amd) { - static int nodeid[NR_NODE_MEMBLKS] __initdata; + static struct numa_meminfo ei __initdata; u64 addr = start_pfn << PAGE_SHIFT; u64 max_addr = last_pfn << PAGE_SHIFT; int num_nodes; @@ -727,10 +730,14 @@ static int __init numa_emulation(unsigned long start_pfn, if (num_nodes < 0) return num_nodes; - for (i = 0; i < ARRAY_SIZE(nodeid); i++) - nodeid[i] = i; + ei.nr_blks = num_nodes; + for (i = 0; i < ei.nr_blks; i++) { + ei.blk[i].start = nodes[i].start; + ei.blk[i].end = nodes[i].end; + ei.blk[i].nid = i; + } - memnode_shift = compute_hash_shift(nodes, num_nodes, nodeid); + memnode_shift = compute_hash_shift(&ei); if (memnode_shift < 0) { memnode_shift = 0; printk(KERN_ERR "No NUMA hash function found. NUMA emulation " @@ -797,9 +804,7 @@ void __init initmem_init(void) nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); - num_node_memblks = 0; - memset(node_memblk_range, 0, sizeof(node_memblk_range)); - memset(memblk_nodeid, 0, sizeof(memblk_nodeid)); + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); memset(numa_nodes, 0, sizeof(numa_nodes)); remove_all_active_ranges(); -- cgit v0.10.2 From f9c60251c3d08777db6758cafd959a55a838abd6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: Separate out numa_cleanup_meminfo() Separate out numa_cleanup_meminfo() from numa_register_memblks(). node_possible_map initialization is moved to the top of the split numa_register_memblks(). This patch doesn't cause behavior change. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index c3496e2..f2721de 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -292,40 +292,8 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } -/* - * Sanity check to catch more bad NUMA configurations (they are amazingly - * common). Make sure the nodes cover all memory. - */ -static int __init nodes_cover_memory(const struct bootnode *nodes) +static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) { - unsigned long numaram, e820ram; - int i; - - numaram = 0; - for_each_node_mask(i, mem_nodes_parsed) { - unsigned long s = nodes[i].start >> PAGE_SHIFT; - unsigned long e = nodes[i].end >> PAGE_SHIFT; - numaram += e - s; - numaram -= __absent_pages_in_range(i, s, e); - if ((long)numaram < 0) - numaram = 0; - } - - e820ram = max_pfn - - (memblock_x86_hole_size(0, max_pfn<> PAGE_SHIFT); - /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ - if ((long)(e820ram - numaram) >= (1<<(20 - PAGE_SHIFT))) { - printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", - (numaram << PAGE_SHIFT) >> 20, - (e820ram << PAGE_SHIFT) >> 20); - return 0; - } - return 1; -} - -static int __init numa_register_memblks(void) -{ - struct numa_meminfo *mi = &numa_meminfo; int i; /* @@ -368,6 +336,49 @@ static int __init numa_register_memblks(void) } } + return 0; +} + +/* + * Sanity check to catch more bad NUMA configurations (they are amazingly + * common). Make sure the nodes cover all memory. + */ +static int __init nodes_cover_memory(const struct bootnode *nodes) +{ + unsigned long numaram, e820ram; + int i; + + numaram = 0; + for_each_node_mask(i, mem_nodes_parsed) { + unsigned long s = nodes[i].start >> PAGE_SHIFT; + unsigned long e = nodes[i].end >> PAGE_SHIFT; + numaram += e - s; + numaram -= __absent_pages_in_range(i, s, e); + if ((long)numaram < 0) + numaram = 0; + } + + e820ram = max_pfn - (memblock_x86_hole_size(0, + max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); + /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ + if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { + printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", + (numaram << PAGE_SHIFT) >> 20, + (e820ram << PAGE_SHIFT) >> 20); + return 0; + } + return 1; +} + +static int __init numa_register_memblks(struct numa_meminfo *mi) +{ + int i; + + /* Account for nodes with cpus and no memory */ + nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); + if (WARN_ON(nodes_empty(node_possible_map))) + return -EINVAL; + memnode_shift = compute_hash_shift(mi); if (memnode_shift < 0) { printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); @@ -823,12 +834,10 @@ void __init initmem_init(void) nodes_clear(node_possible_map); nodes_clear(node_online_map); #endif - /* Account for nodes with cpus and no memory */ - nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); - if (WARN_ON(nodes_empty(node_possible_map))) + if (numa_cleanup_meminfo(&numa_meminfo) < 0) continue; - if (numa_register_memblks() < 0) + if (numa_register_memblks(&numa_meminfo) < 0) continue; for (j = 0; j < nr_cpu_ids; j++) { -- cgit v0.10.2 From 2e756be44714d0ec2f9827e4f4797c60876167a1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: make numa_cleanup_meminfo() prettier * Factor out numa_remove_memblk_from(). * Hole detection doesn't need separate start/end. Calculate start/end once. * Relocate comment. * Define iterators at the top and remove unnecessary prefix increments. This prepares for further improvements to the function. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index f2721de..4fd3368 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -230,6 +230,13 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) return 0; } +static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) +{ + mi->nr_blks--; + memmove(&mi->blk[idx], &mi->blk[idx + 1], + (mi->nr_blks - idx) * sizeof(mi->blk[0])); +} + static __init void cutoff_node(int i, unsigned long start, unsigned long end) { struct bootnode *nd = &numa_nodes[i]; @@ -294,25 +301,25 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) { - int i; + int i, j, k; - /* - * Join together blocks on the same node, holes between - * which don't overlap with memory on other nodes. - */ - for (i = 0; i < mi->nr_blks; ++i) { + for (i = 0; i < mi->nr_blks; i++) { struct numa_memblk *bi = &mi->blk[i]; - int j, k; - for (j = i + 1; j < mi->nr_blks; ++j) { + for (j = i + 1; j < mi->nr_blks; j++) { struct numa_memblk *bj = &mi->blk[j]; unsigned long start, end; + /* + * Join together blocks on the same node, holes + * between which don't overlap with memory on other + * nodes. + */ if (bi->nid != bj->nid) continue; - start = min(bi->end, bj->end); - end = max(bi->start, bj->start); - for (k = 0; k < mi->nr_blks; ++k) { + start = min(bi->start, bj->start); + end = max(bi->end, bj->end); + for (k = 0; k < mi->nr_blks; k++) { struct numa_memblk *bk = &mi->blk[k]; if (bi->nid == bk->nid) @@ -322,17 +329,12 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) } if (k < mi->nr_blks) continue; - start = min(bi->start, bj->start); - end = max(bi->end, bj->end); printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", bi->nid, bi->start, bi->end, bj->start, bj->end, start, end); bi->start = start; bi->end = end; - k = --mi->nr_blks - j; - memmove(mi->blk + j, mi->blk + j + 1, - k * sizeof(mi->blk[0])); - --j; + numa_remove_memblk_from(j--, mi); } } -- cgit v0.10.2 From 56e827fbde9a3cb886a2fe138db0d99e98efbfb1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: consolidate and improve memblk sanity checks memblk sanity check was scattered around and incomplete. Consolidate and improve. * Confliction detection and cutoff_node() logic are moved to numa_cleanup_meminfo(). * numa_cleanup_meminfo() clears the unused memblks before returning. * Check and warn about invalid input parameters in numa_add_memblk(). * Check the maximum number of memblk isn't exceeded in numa_add_memblk(). * numa_cleanup_meminfo() is now called before numa_emulation() so that the emulation code also uses the cleaned up version. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 4fd3368..20aa1d3 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -189,37 +189,23 @@ static void * __init early_node_mem(int nodeid, unsigned long start, return NULL; } -static __init int conflicting_memblks(unsigned long start, unsigned long end) +int __init numa_add_memblk(int nid, u64 start, u64 end) { struct numa_meminfo *mi = &numa_meminfo; - int i; - for (i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *blk = &mi->blk[i]; + /* ignore zero length blks */ + if (start == end) + return 0; - if (blk->start == blk->end) - continue; - if (blk->end > start && blk->start < end) - return blk->nid; - if (blk->end == end && blk->start == start) - return blk->nid; + /* whine about and ignore invalid blks */ + if (start > end || nid < 0 || nid >= MAX_NUMNODES) { + pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", + nid, start, end); + return 0; } - return -1; -} - -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - struct numa_meminfo *mi = &numa_meminfo; - int i; - i = conflicting_memblks(start, end); - if (i == nid) { - printk(KERN_WARNING "NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", - nid, start, end, numa_nodes[i].start, numa_nodes[i].end); - } else if (i >= 0) { - printk(KERN_ERR "NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", - nid, start, end, i, - numa_nodes[i].start, numa_nodes[i].end); + if (mi->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: too many memblk ranges\n"); return -EINVAL; } @@ -237,22 +223,6 @@ static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) (mi->nr_blks - idx) * sizeof(mi->blk[0])); } -static __init void cutoff_node(int i, unsigned long start, unsigned long end) -{ - struct bootnode *nd = &numa_nodes[i]; - - if (nd->start < start) { - nd->start = start; - if (nd->end < nd->start) - nd->start = nd->end; - } - if (nd->end > end) { - nd->end = end; - if (nd->start > nd->end) - nd->start = nd->end; - } -} - /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) @@ -301,24 +271,53 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) { + const u64 low = 0; + const u64 high = (u64)max_pfn << PAGE_SHIFT; int i, j, k; for (i = 0; i < mi->nr_blks; i++) { struct numa_memblk *bi = &mi->blk[i]; + /* make sure all blocks are inside the limits */ + bi->start = max(bi->start, low); + bi->end = min(bi->end, high); + + /* and there's no empty block */ + if (bi->start == bi->end) { + numa_remove_memblk_from(i--, mi); + continue; + } + for (j = i + 1; j < mi->nr_blks; j++) { struct numa_memblk *bj = &mi->blk[j]; unsigned long start, end; /* + * See whether there are overlapping blocks. Whine + * about but allow overlaps of the same nid. They + * will be merged below. + */ + if (bi->end > bj->start && bi->start < bj->end) { + if (bi->nid != bj->nid) { + pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", + bi->nid, bi->start, bi->end, + bj->nid, bj->start, bj->end); + return -EINVAL; + } + pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", + bi->nid, bi->start, bi->end, + bj->start, bj->end); + } + + /* * Join together blocks on the same node, holes * between which don't overlap with memory on other * nodes. */ if (bi->nid != bj->nid) continue; - start = min(bi->start, bj->start); - end = max(bi->end, bj->end); + start = max(min(bi->start, bj->start), low); + end = min(max(bi->end, bj->end), high); for (k = 0; k < mi->nr_blks; k++) { struct numa_memblk *bk = &mi->blk[k]; @@ -338,6 +337,11 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) } } + for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { + mi->blk[i].start = mi->blk[i].end = 0; + mi->blk[i].nid = NUMA_NO_NODE; + } + return 0; } @@ -824,10 +828,8 @@ void __init initmem_init(void) if (numa_init[i]() < 0) continue; - /* clean up the node list */ - for (j = 0; j < MAX_NUMNODES; j++) - cutoff_node(j, 0, max_pfn << PAGE_SHIFT); - + if (numa_cleanup_meminfo(&numa_meminfo) < 0) + continue; #ifdef CONFIG_NUMA_EMU setup_physnodes(0, max_pfn << PAGE_SHIFT); if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) @@ -836,9 +838,6 @@ void __init initmem_init(void) nodes_clear(node_possible_map); nodes_clear(node_online_map); #endif - if (numa_cleanup_meminfo(&numa_meminfo) < 0) - continue; - if (numa_register_memblks(&numa_meminfo) < 0) continue; -- cgit v0.10.2 From a844ef46fa3055165c28feede6114a711b8375ad Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: Add common find_node_by_addr() srat_64.c and amdtopology_64.c had their own versions of find_node_by_addr() which were basically the same. Add common one in numa_64.c and remove the duplicates. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index e925605..925ade9 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -35,6 +35,7 @@ extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) void numa_emu_cmdline(char *); +int __init find_node_by_addr(unsigned long addr); #endif /* CONFIG_NUMA_EMU */ #else static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 90cf297..8f7a5eb 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -205,19 +205,6 @@ static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; -static int __init find_node_by_addr(unsigned long addr) -{ - int ret = NUMA_NO_NODE; - int i; - - for (i = 0; i < 8; i++) - if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { - ret = i; - break; - } - return ret; -} - /* * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be * setup to represent the physical topology but reflect the emulated diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 20aa1d3..681bc0d 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -430,6 +430,25 @@ void __init numa_emu_cmdline(char *str) cmdline = str; } +int __init find_node_by_addr(unsigned long addr) +{ + int ret = NUMA_NO_NODE; + int i; + + for_each_node_mask(i, mem_nodes_parsed) { + /* + * Find the real node that this emulated node appears on. For + * the sake of simplicity, we only use a real node's starting + * address to determine which emulated node it appears on. + */ + if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { + ret = i; + break; + } + } + return ret; +} + static int __init setup_physnodes(unsigned long start, unsigned long end) { int ret = 0; diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index d56eff8..51d0733 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -277,24 +277,6 @@ static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = { static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; -static int __init find_node_by_addr(unsigned long addr) -{ - int ret = NUMA_NO_NODE; - int i; - - for_each_node_mask(i, mem_nodes_parsed) { - /* - * Find the real node that this emulated node appears on. For - * the sake of simplicity, we only use a real node's starting - * address to determine which emulated node it appears on. - */ - if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { - ret = i; - break; - } - } - return ret; -} /* * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID -- cgit v0.10.2 From 91556237ec872e1029e3036174bae3b1a8df65eb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: Kill numa_nodes[] numa_nodes[] doesn't carry any information which isn't present in numa_meminfo. Each entry is simply min/max range of all the memblks for the node. This is not only redundant but also inaccurate when memblks for different nodes interleave - for example, find_node_by_addr() can return the wrong nodeid. Kill numa_nodes[] and always use numa_meminfo instead. * nodes_cover_memory() is renamed to numa_meminfo_cover_memory() and now operations on numa_meminfo and returns bool. * setup_node_bootmem() needs min/max range. Compute the range on the fly. setup_node_bootmem() invocation is restructured to use outer loop instead of hardcoding the double invocations. * find_node_by_addr() now operates on numa_meminfo. * setup_physnodes() builds physnodes[] from memblks. This will go away when emulation code is updated to use struct numa_meminfo. This patch also makes the following misc changes. * Clearing of nodes_add[] clearing is converted to memset(). * numa_add_memblk() in amd_numa_init() is moved down a bit for consistency. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 925ade9..20b69a9 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -26,7 +26,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, extern nodemask_t cpu_nodes_parsed __initdata; extern nodemask_t mem_nodes_parsed __initdata; -extern struct bootnode numa_nodes[MAX_NUMNODES] __initdata; extern int __cpuinit numa_cpu_node(int cpu); extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 8f7a5eb..0cb59e5 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -165,12 +165,8 @@ int __init amd_numa_init(void) pr_info("Node %d MemBase %016lx Limit %016lx\n", nodeid, base, limit); - numa_nodes[nodeid].start = base; - numa_nodes[nodeid].end = limit; - numa_add_memblk(nodeid, base, limit); - prevbase = base; - + numa_add_memblk(nodeid, base, limit); node_set(nodeid, mem_nodes_parsed); node_set(nodeid, cpu_nodes_parsed); } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 681bc0d..c490448 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -46,8 +46,6 @@ static unsigned long __initdata nodemap_size; static struct numa_meminfo numa_meminfo __initdata; -struct bootnode numa_nodes[MAX_NUMNODES] __initdata; - /* * Given a shift value, try to populate memnodemap[] * Returns : @@ -349,17 +347,17 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) * Sanity check to catch more bad NUMA configurations (they are amazingly * common). Make sure the nodes cover all memory. */ -static int __init nodes_cover_memory(const struct bootnode *nodes) +static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) { unsigned long numaram, e820ram; int i; numaram = 0; - for_each_node_mask(i, mem_nodes_parsed) { - unsigned long s = nodes[i].start >> PAGE_SHIFT; - unsigned long e = nodes[i].end >> PAGE_SHIFT; + for (i = 0; i < mi->nr_blks; i++) { + unsigned long s = mi->blk[i].start >> PAGE_SHIFT; + unsigned long e = mi->blk[i].end >> PAGE_SHIFT; numaram += e - s; - numaram -= __absent_pages_in_range(i, s, e); + numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); if ((long)numaram < 0) numaram = 0; } @@ -371,14 +369,14 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", (numaram << PAGE_SHIFT) >> 20, (e820ram << PAGE_SHIFT) >> 20); - return 0; + return false; } - return 1; + return true; } static int __init numa_register_memblks(struct numa_meminfo *mi) { - int i; + int i, j, nid; /* Account for nodes with cpus and no memory */ nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); @@ -398,23 +396,34 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) /* for out of order entries */ sort_node_map(); - if (!nodes_cover_memory(numa_nodes)) + if (!numa_meminfo_cover_memory(mi)) return -EINVAL; init_memory_mapping_high(); - /* Finally register nodes. */ - for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); - /* - * Try again in case setup_node_bootmem missed one due to missing - * bootmem. + * Finally register nodes. Do it twice in case setup_node_bootmem + * missed one due to missing bootmem. */ - for_each_node_mask(i, node_possible_map) - if (!node_online(i)) - setup_node_bootmem(i, numa_nodes[i].start, - numa_nodes[i].end); + for (i = 0; i < 2; i++) { + for_each_node_mask(nid, node_possible_map) { + u64 start = (u64)max_pfn << PAGE_SHIFT; + u64 end = 0; + + if (node_online(nid)) + continue; + + for (j = 0; j < mi->nr_blks; j++) { + if (nid != mi->blk[j].nid) + continue; + start = min(mi->blk[j].start, start); + end = max(mi->blk[j].end, end); + } + + if (start < end) + setup_node_bootmem(nid, start, end); + } + } return 0; } @@ -432,33 +441,41 @@ void __init numa_emu_cmdline(char *str) int __init find_node_by_addr(unsigned long addr) { - int ret = NUMA_NO_NODE; + const struct numa_meminfo *mi = &numa_meminfo; int i; - for_each_node_mask(i, mem_nodes_parsed) { + for (i = 0; i < mi->nr_blks; i++) { /* * Find the real node that this emulated node appears on. For * the sake of simplicity, we only use a real node's starting * address to determine which emulated node it appears on. */ - if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { - ret = i; - break; - } + if (addr >= mi->blk[i].start && addr < mi->blk[i].end) + return mi->blk[i].nid; } - return ret; + return NUMA_NO_NODE; } static int __init setup_physnodes(unsigned long start, unsigned long end) { + const struct numa_meminfo *mi = &numa_meminfo; int ret = 0; int i; memset(physnodes, 0, sizeof(physnodes)); - for_each_node_mask(i, mem_nodes_parsed) { - physnodes[i].start = numa_nodes[i].start; - physnodes[i].end = numa_nodes[i].end; + for (i = 0; i < mi->nr_blks; i++) { + int nid = mi->blk[i].nid; + + if (physnodes[nid].start == physnodes[nid].end) { + physnodes[nid].start = mi->blk[i].start; + physnodes[nid].end = mi->blk[i].end; + } else { + physnodes[nid].start = min(physnodes[nid].start, + mi->blk[i].start); + physnodes[nid].end = max(physnodes[nid].end, + mi->blk[i].end); + } } /* @@ -809,8 +826,6 @@ static int dummy_numa_init(void) node_set(0, cpu_nodes_parsed); node_set(0, mem_nodes_parsed); numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); - numa_nodes[0].start = 0; - numa_nodes[0].end = (u64)max_pfn << PAGE_SHIFT; return 0; } @@ -841,7 +856,6 @@ void __init initmem_init(void) nodes_clear(node_possible_map); nodes_clear(node_online_map); memset(&numa_meminfo, 0, sizeof(numa_meminfo)); - memset(numa_nodes, 0, sizeof(numa_nodes)); remove_all_active_ranges(); if (numa_init[i]() < 0) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 51d0733..e8b3b3c 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -37,13 +37,9 @@ static __init int setup_node(int pxm) static __init void bad_srat(void) { - int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; - for (i = 0; i < MAX_NUMNODES; i++) { - numa_nodes[i].start = numa_nodes[i].end = 0; - nodes_add[i].start = nodes_add[i].end = 0; - } + memset(nodes_add, 0, sizeof(nodes_add)); } static __init inline int srat_disabled(void) @@ -210,7 +206,6 @@ update_nodes_add(int node, unsigned long start, unsigned long end) void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { - struct bootnode *nd; unsigned long start, end; int node, pxm; @@ -243,18 +238,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, start, end); - if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { - nd = &numa_nodes[node]; - if (!node_test_and_set(node, mem_nodes_parsed)) { - nd->start = start; - nd->end = end; - } else { - if (start < nd->start) - nd->start = start; - if (nd->end < end) - nd->end = end; - } - } else + if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) + node_set(node, mem_nodes_parsed); + else update_nodes_add(node, start, end); } -- cgit v0.10.2 From 92d4a4371eeb89e1e12b9ebbed0956f499b6c2c0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: Rename cpu_nodes_parsed to numa_nodes_parsed It's no longer necessary to keep both cpu_nodes_parsed and mem_nodes_parsed. In preparation for merge, rename cpu_nodes_parsed to numa_nodes_parsed. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 20b69a9..6e94469 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -24,7 +24,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, */ #define NODE_MIN_SIZE (4*1024*1024) -extern nodemask_t cpu_nodes_parsed __initdata; +extern nodemask_t numa_nodes_parsed __initdata; extern nodemask_t mem_nodes_parsed __initdata; extern int __cpuinit numa_cpu_node(int cpu); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 0cb59e5..e76bffa 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -168,7 +168,7 @@ int __init amd_numa_init(void) prevbase = base; numa_add_memblk(nodeid, base, limit); node_set(nodeid, mem_nodes_parsed); - node_set(nodeid, cpu_nodes_parsed); + node_set(nodeid, numa_nodes_parsed); } if (!nodes_weight(mem_nodes_parsed)) @@ -189,7 +189,7 @@ int __init amd_numa_init(void) apicid_base = boot_cpu_physical_apicid; } - for_each_node_mask(i, cpu_nodes_parsed) + for_each_node_mask(i, numa_nodes_parsed) for (j = apicid_base; j < cores + apicid_base; j++) set_apicid_to_node((i << bits) + j, i); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index c490448..6e4fbd7 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -36,7 +36,7 @@ struct numa_meminfo { struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); -nodemask_t cpu_nodes_parsed __initdata; +nodemask_t numa_nodes_parsed __initdata; nodemask_t mem_nodes_parsed __initdata; struct memnode memnode; @@ -379,7 +379,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) int i, j, nid; /* Account for nodes with cpus and no memory */ - nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); + nodes_or(node_possible_map, mem_nodes_parsed, numa_nodes_parsed); if (WARN_ON(nodes_empty(node_possible_map))) return -EINVAL; @@ -823,7 +823,7 @@ static int dummy_numa_init(void) printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 0LU, max_pfn << PAGE_SHIFT); - node_set(0, cpu_nodes_parsed); + node_set(0, numa_nodes_parsed); node_set(0, mem_nodes_parsed); numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); @@ -851,7 +851,7 @@ void __init initmem_init(void) for (j = 0; j < MAX_LOCAL_APIC; j++) set_apicid_to_node(j, NUMA_NO_NODE); - nodes_clear(cpu_nodes_parsed); + nodes_clear(numa_nodes_parsed); nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index e8b3b3c..8185189 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -94,7 +94,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) return; } set_apicid_to_node(apic_id, node); - node_set(node, cpu_nodes_parsed); + node_set(node, numa_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node); @@ -134,7 +134,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) } set_apicid_to_node(apic_id, node); - node_set(node, cpu_nodes_parsed); + node_set(node, numa_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node); @@ -196,7 +196,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end) } if (changed) { - node_set(node, cpu_nodes_parsed); + node_set(node, numa_nodes_parsed); printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); } -- cgit v0.10.2 From 4697bdcc945c094d2c8a4876a24faeaf31a283e0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: Kill mem_nodes_parsed With all memory configuration information now carried in numa_meminfo, there's no need to keep mem_nodes_parsed separate. Drop it and use numa_nodes_parsed for CPU / memory-less nodes. A new helper numa_nodemask_from_meminfo() is added to calculate memnode mask on the fly which is currently used to set node_possible_map. This simplifies NUMA init methods a bit and removes a source of possible inconsistencies. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 6e94469..f42710b 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -25,7 +25,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, #define NODE_MIN_SIZE (4*1024*1024) extern nodemask_t numa_nodes_parsed __initdata; -extern nodemask_t mem_nodes_parsed __initdata; extern int __cpuinit numa_cpu_node(int cpu); extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index e76bffa..fd7b609 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -122,7 +122,7 @@ int __init amd_numa_init(void) nodeid, (base >> 8) & 3, (limit >> 8) & 3); return -EINVAL; } - if (node_isset(nodeid, mem_nodes_parsed)) { + if (node_isset(nodeid, numa_nodes_parsed)) { pr_info("Node %d already present, skipping\n", nodeid); continue; @@ -167,11 +167,10 @@ int __init amd_numa_init(void) prevbase = base; numa_add_memblk(nodeid, base, limit); - node_set(nodeid, mem_nodes_parsed); node_set(nodeid, numa_nodes_parsed); } - if (!nodes_weight(mem_nodes_parsed)) + if (!nodes_weight(numa_nodes_parsed)) return -ENOENT; /* diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 6e4fbd7..8b1f178 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -37,7 +37,6 @@ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); nodemask_t numa_nodes_parsed __initdata; -nodemask_t mem_nodes_parsed __initdata; struct memnode memnode; @@ -344,6 +343,20 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) } /* + * Set nodes, which have memory in @mi, in *@nodemask. + */ +static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, + const struct numa_meminfo *mi) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mi->blk); i++) + if (mi->blk[i].start != mi->blk[i].end && + mi->blk[i].nid != NUMA_NO_NODE) + node_set(mi->blk[i].nid, *nodemask); +} + +/* * Sanity check to catch more bad NUMA configurations (they are amazingly * common). Make sure the nodes cover all memory. */ @@ -379,7 +392,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) int i, j, nid; /* Account for nodes with cpus and no memory */ - nodes_or(node_possible_map, mem_nodes_parsed, numa_nodes_parsed); + node_possible_map = numa_nodes_parsed; + numa_nodemask_from_meminfo(&node_possible_map, mi); if (WARN_ON(nodes_empty(node_possible_map))) return -EINVAL; @@ -824,7 +838,6 @@ static int dummy_numa_init(void) 0LU, max_pfn << PAGE_SHIFT); node_set(0, numa_nodes_parsed); - node_set(0, mem_nodes_parsed); numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); return 0; @@ -852,7 +865,6 @@ void __init initmem_init(void) set_apicid_to_node(j, NUMA_NO_NODE); nodes_clear(numa_nodes_parsed); - nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); memset(&numa_meminfo, 0, sizeof(numa_meminfo)); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 8185189..4f8e6cd 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -238,9 +238,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, start, end); - if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) - node_set(node, mem_nodes_parsed); - else + if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) update_nodes_add(node, start, end); } @@ -310,10 +308,9 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); - nodes_clear(mem_nodes_parsed); for (i = 0; i < num_nodes; i++) if (fake_nodes[i].start != fake_nodes[i].end) - node_set(i, mem_nodes_parsed); + node_set(i, numa_nodes_parsed); } static int null_slit_node_compare(int a, int b) -- cgit v0.10.2 From ac7136b611ee8f8bd6231ce2e1dbdd31ae3d39bc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:09 +0100 Subject: x86-64, NUMA: Implement generic node distance handling Node distance either used direct node comparison, ACPI PXM comparison or ACPI SLIT table lookup. This patch implements generic node distance handling. NUMA init methods can call numa_set_distance() to set distance between nodes and the common __node_distance() implementation will report the set distance. Due to the way NUMA emulation is implemented, the generic node distance handling is used only when emulation is not used. Later patches will update NUMA emulation to use the generic distance mechanism. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index cfa3d5c..9c9fe1b 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -190,6 +190,7 @@ extern int x86_acpi_numa_init(void); #ifdef CONFIG_NUMA_EMU extern void acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes); +extern int acpi_emu_node_distance(int a, int b); #endif #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index f42710b..5361c59 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -28,6 +28,7 @@ extern nodemask_t numa_nodes_parsed __initdata; extern int __cpuinit numa_cpu_node(int cpu); extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void __init numa_set_distance(int from, int to, int distance); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index b101c17..910a708 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -138,7 +138,7 @@ extern unsigned long node_remap_size[]; .balance_interval = 1, \ } -#ifdef CONFIG_X86_64_ACPI_NUMA +#ifdef CONFIG_X86_64 extern int __node_distance(int, int); #define node_distance(a, b) __node_distance(a, b) #endif diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 8b1f178..a3621f2 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -45,6 +45,13 @@ static unsigned long __initdata nodemap_size; static struct numa_meminfo numa_meminfo __initdata; +static int numa_distance_cnt; +static u8 *numa_distance; + +#ifdef CONFIG_NUMA_EMU +static bool numa_emu_dist; +#endif + /* * Given a shift value, try to populate memnodemap[] * Returns : @@ -357,6 +364,92 @@ static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, } /* + * Reset distance table. The current table is freed. The next + * numa_set_distance() call will create a new one. + */ +static void __init numa_reset_distance(void) +{ + size_t size; + + size = numa_distance_cnt * sizeof(numa_distance[0]); + memblock_x86_free_range(__pa(numa_distance), + __pa(numa_distance) + size); + numa_distance = NULL; + numa_distance_cnt = 0; +} + +/* + * Set the distance between node @from to @to to @distance. If distance + * table doesn't exist, one which is large enough to accomodate all the + * currently known nodes will be created. + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance) { + nodemask_t nodes_parsed; + size_t size; + int i, j, cnt = 0; + u64 phys; + + /* size the new table and allocate it */ + nodes_parsed = numa_nodes_parsed; + numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); + + for_each_node_mask(i, nodes_parsed) + cnt = i; + size = ++cnt * sizeof(numa_distance[0]); + + phys = memblock_find_in_range(0, + (u64)max_pfn_mapped << PAGE_SHIFT, + size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate distance table!\n"); + /* don't retry until explicitly reset */ + numa_distance = (void *)1LU; + return; + } + memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); + + numa_distance = __va(phys); + numa_distance_cnt = cnt; + + /* fill with the default distances */ + for (i = 0; i < cnt; i++) + for (j = 0; j < cnt; j++) + numa_distance[i * cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); + } + + if (from >= numa_distance_cnt || to >= numa_distance_cnt) { + printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +int __node_distance(int from, int to) +{ +#if defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA_EMU) + if (numa_emu_dist) + return acpi_emu_node_distance(from, to); +#endif + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +/* * Sanity check to catch more bad NUMA configurations (they are amazingly * common). Make sure the nodes cover all memory. */ @@ -826,6 +919,7 @@ static int __init numa_emulation(unsigned long start_pfn, setup_physnodes(addr, max_addr); fake_physnodes(acpi, amd, num_nodes); numa_init_array(); + numa_emu_dist = true; return 0; } #endif /* CONFIG_NUMA_EMU */ @@ -869,6 +963,7 @@ void __init initmem_init(void) nodes_clear(node_online_map); memset(&numa_meminfo, 0, sizeof(numa_meminfo)); remove_all_active_ranges(); + numa_reset_distance(); if (numa_init[i]() < 0) continue; diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 4f8e6cd..d2f53f3 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -50,9 +50,16 @@ static __init inline int srat_disabled(void) /* Callback for SLIT parsing */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { + int i, j; unsigned length; unsigned long phys; + for (i = 0; i < slit->locality_count; i++) + for (j = 0; j < slit->locality_count; j++) + numa_set_distance(pxm_to_node(i), pxm_to_node(j), + slit->entry[slit->locality_count * i + j]); + + /* acpi_slit is used only by emulation */ length = slit->header.length; phys = memblock_find_in_range(0, max_pfn_mapped<locality_count * node_to_pxm(a); return acpi_slit->entry[index + node_to_pxm(b)]; } - -EXPORT_SYMBOL(__node_distance); +#endif /* CONFIG_NUMA_EMU */ #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) int memory_add_physaddr_to_nid(u64 start) -- cgit v0.10.2 From d9c515eacb3bde73f7a5ecb7e35ea6e660ad421d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Trivial changes to prepare for emulation updates * Separate out numa_add_memblk_to() from numa_add_memblk() so that different numa_meminfo can be used. * Rename cmdline to emu_cmdline. * Drop @start/last_pfn from numa_emulation() and use max_pfn directly. This patch doesn't introduce any behavior change. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a3621f2..20e2cfe 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -193,10 +193,9 @@ static void * __init early_node_mem(int nodeid, unsigned long start, return NULL; } -int __init numa_add_memblk(int nid, u64 start, u64 end) +static int __init numa_add_memblk_to(int nid, u64 start, u64 end, + struct numa_meminfo *mi) { - struct numa_meminfo *mi = &numa_meminfo; - /* ignore zero length blks */ if (start == end) return 0; @@ -227,6 +226,11 @@ static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) (mi->nr_blks - idx) * sizeof(mi->blk[0])); } +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + return numa_add_memblk_to(nid, start, end, &numa_meminfo); +} + /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) @@ -539,11 +543,11 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) /* Numa emulation */ static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; -static char *cmdline __initdata; +static char *emu_cmdline __initdata; void __init numa_emu_cmdline(char *str) { - cmdline = str; + emu_cmdline = str; } int __init find_node_by_addr(unsigned long addr) @@ -861,12 +865,10 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * Sets up the system RAM area from start_pfn to last_pfn according to the * numa=fake command-line option. */ -static int __init numa_emulation(unsigned long start_pfn, - unsigned long last_pfn, int acpi, int amd) +static int __init numa_emulation(int acpi, int amd) { static struct numa_meminfo ei __initdata; - u64 addr = start_pfn << PAGE_SHIFT; - u64 max_addr = last_pfn << PAGE_SHIFT; + const u64 max_addr = max_pfn << PAGE_SHIFT; int num_nodes; int i; @@ -875,16 +877,16 @@ static int __init numa_emulation(unsigned long start_pfn, * the fixed node size. Otherwise, if it is just a single number N, * split the system RAM into N fake nodes. */ - if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) { + if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { u64 size; - size = memparse(cmdline, &cmdline); - num_nodes = split_nodes_size_interleave(addr, max_addr, size); + size = memparse(emu_cmdline, &emu_cmdline); + num_nodes = split_nodes_size_interleave(0, max_addr, size); } else { unsigned long n; - n = simple_strtoul(cmdline, NULL, 0); - num_nodes = split_nodes_interleave(addr, max_addr, n); + n = simple_strtoul(emu_cmdline, NULL, 0); + num_nodes = split_nodes_interleave(0, max_addr, n); } if (num_nodes < 0) @@ -916,7 +918,7 @@ static int __init numa_emulation(unsigned long start_pfn, init_memory_mapping_high(); for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - setup_physnodes(addr, max_addr); + setup_physnodes(0, max_addr); fake_physnodes(acpi, amd, num_nodes); numa_init_array(); numa_emu_dist = true; @@ -972,7 +974,7 @@ void __init initmem_init(void) continue; #ifdef CONFIG_NUMA_EMU setup_physnodes(0, max_pfn << PAGE_SHIFT); - if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) + if (emu_cmdline && !numa_emulation(i == 0, i == 1)) return; setup_physnodes(0, max_pfn << PAGE_SHIFT); nodes_clear(node_possible_map); -- cgit v0.10.2 From 9d073caeb372940af02a768d2b7e845ac732bda0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping NUMA emulation copied physical NUMA configuration into physnodes[] and used it to reverse-map emulated nodes to physical nodes, which is unnecessarily convoluted. Build emu_nid_to_phys[] array to map emulated nids directly to the matching physical nids and use it in numa_add_cpu(). physnodes[] will be removed with further patches. - v2: Build failure when CONFIG_DEBUG_PER_CPU_MAPS due to missing local variable definition fixed. Reported by Ingo. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 20e2cfe..e9919c4 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -542,7 +542,9 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) #ifdef CONFIG_NUMA_EMU /* Numa emulation */ static struct bootnode nodes[MAX_NUMNODES] __initdata; -static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; +static struct bootnode physnodes[MAX_NUMNODES] __initdata; + +static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; static char *emu_cmdline __initdata; void __init numa_emu_cmdline(char *str) @@ -649,7 +651,8 @@ static void __init fake_physnodes(int acpi, int amd, int nr_nodes) * allocation past addr and -1 otherwise. addr is adjusted to be at * the end of the node. */ -static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) +static int __init setup_node_range(int nid, int physnid, + u64 *addr, u64 size, u64 max_addr) { int ret = 0; nodes[nid].start = *addr; @@ -660,6 +663,10 @@ static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) } nodes[nid].end = *addr; node_set(nid, node_possible_map); + + if (emu_nid_to_phys[nid] == NUMA_NO_NODE) + emu_nid_to_phys[nid] = physnid; + printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, nodes[nid].start, nodes[nid].end, (nodes[nid].end - nodes[nid].start) >> 20); @@ -756,7 +763,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) if (nodes_weight(physnode_mask) + ret >= nr_nodes) end = physnodes[i].end; - if (setup_node_range(ret++, &physnodes[i].start, + if (setup_node_range(ret++, i, &physnodes[i].start, end - physnodes[i].start, physnodes[i].end) < 0) node_clear(i, physnode_mask); @@ -852,7 +859,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * later. If setup_node_range() returns non-zero, there * is no more memory available on this physical node. */ - if (setup_node_range(ret++, &physnodes[i].start, + if (setup_node_range(ret++, i, &physnodes[i].start, end - physnodes[i].start, physnodes[i].end) < 0) node_clear(i, physnode_mask); @@ -872,6 +879,9 @@ static int __init numa_emulation(int acpi, int amd) int num_nodes; int i; + for (i = 0; i < MAX_NUMNODES; i++) + emu_nid_to_phys[i] = NUMA_NO_NODE; + /* * If the numa=fake command-line contains a 'M' or 'G', it represents * the fixed node size. Otherwise, if it is just a single number N, @@ -892,6 +902,11 @@ static int __init numa_emulation(int acpi, int amd) if (num_nodes < 0) return num_nodes; + /* make sure all emulated nodes are mapped to a physical node */ + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + if (emu_nid_to_phys[i] == NUMA_NO_NODE) + emu_nid_to_phys[i] = 0; + ei.nr_blks = num_nodes; for (i = 0; i < ei.nr_blks; i++) { ei.blk[i].start = nodes[i].start; @@ -918,7 +933,6 @@ static int __init numa_emulation(int acpi, int amd) init_memory_mapping_high(); for_each_node_mask(i, node_possible_map) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - setup_physnodes(0, max_addr); fake_physnodes(acpi, amd, num_nodes); numa_init_array(); numa_emu_dist = true; @@ -976,7 +990,11 @@ void __init initmem_init(void) setup_physnodes(0, max_pfn << PAGE_SHIFT); if (emu_cmdline && !numa_emulation(i == 0, i == 1)) return; - setup_physnodes(0, max_pfn << PAGE_SHIFT); + + /* not emulating, build identity mapping for numa_add_cpu() */ + for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) + emu_nid_to_phys[j] = j; + nodes_clear(node_possible_map); nodes_clear(node_online_map); #endif @@ -1033,7 +1051,6 @@ int __cpuinit numa_cpu_node(int cpu) # ifndef CONFIG_DEBUG_PER_CPU_MAPS void __cpuinit numa_add_cpu(int cpu) { - unsigned long addr; int physnid, nid; nid = numa_cpu_node(cpu); @@ -1041,26 +1058,15 @@ void __cpuinit numa_add_cpu(int cpu) nid = early_cpu_to_node(cpu); BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); - /* - * Use the starting address of the emulated node to find which physical - * node it is allocated on. - */ - addr = node_start_pfn(nid) << PAGE_SHIFT; - for (physnid = 0; physnid < MAX_NUMNODES; physnid++) - if (addr >= physnodes[physnid].start && - addr < physnodes[physnid].end) - break; + physnid = emu_nid_to_phys[nid]; /* * Map the cpu to each emulated node that is allocated on the physical * node of the cpu's apic id. */ - for_each_online_node(nid) { - addr = node_start_pfn(nid) << PAGE_SHIFT; - if (addr >= physnodes[physnid].start && - addr < physnodes[physnid].end) + for_each_online_node(nid) + if (emu_nid_to_phys[nid] == physnid) cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); - } } void __cpuinit numa_remove_cpu(int cpu) @@ -1073,21 +1079,21 @@ void __cpuinit numa_remove_cpu(int cpu) # else /* !CONFIG_DEBUG_PER_CPU_MAPS */ static void __cpuinit numa_set_cpumask(int cpu, int enable) { - int node = early_cpu_to_node(cpu); struct cpumask *mask; - int i; + int nid, physnid, i; - if (node == NUMA_NO_NODE) { + nid = early_cpu_to_node(cpu); + if (nid == NUMA_NO_NODE) { /* early_cpu_to_node() already emits a warning and trace */ return; } - for_each_online_node(i) { - unsigned long addr; - addr = node_start_pfn(i) << PAGE_SHIFT; - if (addr < physnodes[node].start || - addr >= physnodes[node].end) + physnid = emu_nid_to_phys[nid]; + + for_each_online_node(i) { + if (emu_nid_to_phys[nid] != physnid) continue; + mask = debug_cpumask_set_cpu(cpu, enable); if (!mask) return; -- cgit v0.10.2 From c88aea7a70b0f014f98c695069ba91abc9e9b9a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Make emulation code build numa_meminfo and share the registration path NUMA emulation code built nodes[] array and had its own registration path to set up the emulated nodes. Update it such that it generates emulated numa_meminfo and returns control to initmem_init() and shares the same registration path with non-emulated cases. Because {acpi|amd}_fake_nodes() expect nodes[] parameter, fake_physnodes() now generates nodes[] from numa_meminfo. This will go away with further updates. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index e9919c4..9736204 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -541,7 +541,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) #ifdef CONFIG_NUMA_EMU /* Numa emulation */ -static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode physnodes[MAX_NUMNODES] __initdata; static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; @@ -626,9 +625,24 @@ static int __init setup_physnodes(unsigned long start, unsigned long end) return ret; } -static void __init fake_physnodes(int acpi, int amd, int nr_nodes) +static void __init fake_physnodes(int acpi, int amd, + const struct numa_meminfo *ei) { - int i; + static struct bootnode nodes[MAX_NUMNODES] __initdata; + int i, nr_nodes = 0; + + for (i = 0; i < ei->nr_blks; i++) { + int nid = ei->blk[i].nid; + + if (nodes[nid].start == nodes[nid].end) { + nodes[nid].start = ei->blk[i].start; + nodes[nid].end = ei->blk[i].end; + nr_nodes++; + } else { + nodes[nid].start = min(ei->blk[i].start, nodes[nid].start); + nodes[nid].end = max(ei->blk[i].end, nodes[nid].end); + } + } BUG_ON(acpi && amd); #ifdef CONFIG_ACPI_NUMA @@ -645,45 +659,44 @@ static void __init fake_physnodes(int acpi, int amd, int nr_nodes) } /* - * Setups up nid to range from addr to addr + size. If the end - * boundary is greater than max_addr, then max_addr is used instead. - * The return value is 0 if there is additional memory left for - * allocation past addr and -1 otherwise. addr is adjusted to be at - * the end of the node. + * Sets up nid to range from @start to @end. The return value is -errno if + * something went wrong, 0 otherwise. */ -static int __init setup_node_range(int nid, int physnid, - u64 *addr, u64 size, u64 max_addr) +static int __init emu_setup_memblk(struct numa_meminfo *ei, + int nid, int physnid, u64 start, u64 end) { - int ret = 0; - nodes[nid].start = *addr; - *addr += size; - if (*addr >= max_addr) { - *addr = max_addr; - ret = -1; + struct numa_memblk *eb = &ei->blk[ei->nr_blks]; + + if (ei->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: Too many emulated memblks, failing emulation\n"); + return -EINVAL; } - nodes[nid].end = *addr; - node_set(nid, node_possible_map); + + ei->nr_blks++; + eb->start = start; + eb->end = end; + eb->nid = nid; if (emu_nid_to_phys[nid] == NUMA_NO_NODE) emu_nid_to_phys[nid] = physnid; printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, - nodes[nid].start, nodes[nid].end, - (nodes[nid].end - nodes[nid].start) >> 20); - return ret; + eb->start, eb->end, (eb->end - eb->start) >> 20); + return 0; } /* * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr * to max_addr. The return value is the number of nodes allocated. */ -static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) +static int __init split_nodes_interleave(struct numa_meminfo *ei, + u64 addr, u64 max_addr, int nr_nodes) { nodemask_t physnode_mask = NODE_MASK_NONE; u64 size; int big; - int ret = 0; - int i; + int nid = 0; + int i, ret; if (nr_nodes <= 0) return -1; @@ -721,7 +734,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) u64 end = physnodes[i].start + size; u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); - if (ret < big) + if (nid < big) end += FAKE_NODE_MIN_SIZE; /* @@ -760,16 +773,21 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) * happen as a result of rounding down each node's size * to FAKE_NODE_MIN_SIZE. */ - if (nodes_weight(physnode_mask) + ret >= nr_nodes) + if (nodes_weight(physnode_mask) + nid >= nr_nodes) end = physnodes[i].end; - if (setup_node_range(ret++, i, &physnodes[i].start, - end - physnodes[i].start, - physnodes[i].end) < 0) + ret = emu_setup_memblk(ei, nid++, i, + physnodes[i].start, + min(end, physnodes[i].end)); + if (ret < 0) + return ret; + + physnodes[i].start = min(end, physnodes[i].end); + if (physnodes[i].start == physnodes[i].end) node_clear(i, physnode_mask); } } - return ret; + return 0; } /* @@ -794,12 +812,13 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) * Sets up fake nodes of `size' interleaved over physical nodes ranging from * `addr' to `max_addr'. The return value is the number of nodes allocated. */ -static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) +static int __init split_nodes_size_interleave(struct numa_meminfo *ei, + u64 addr, u64 max_addr, u64 size) { nodemask_t physnode_mask = NODE_MASK_NONE; u64 min_size; - int ret = 0; - int i; + int nid = 0; + int i, ret; if (!size) return -1; @@ -854,30 +873,31 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; - /* - * Setup the fake node that will be allocated as bootmem - * later. If setup_node_range() returns non-zero, there - * is no more memory available on this physical node. - */ - if (setup_node_range(ret++, i, &physnodes[i].start, - end - physnodes[i].start, - physnodes[i].end) < 0) + ret = emu_setup_memblk(ei, nid++, i, + physnodes[i].start, + min(end, physnodes[i].end)); + if (ret < 0) + return ret; + + physnodes[i].start = min(end, physnodes[i].end); + if (physnodes[i].start == physnodes[i].end) node_clear(i, physnode_mask); } } - return ret; + return 0; } /* * Sets up the system RAM area from start_pfn to last_pfn according to the * numa=fake command-line option. */ -static int __init numa_emulation(int acpi, int amd) +static bool __init numa_emulation(int acpi, int amd) { static struct numa_meminfo ei __initdata; const u64 max_addr = max_pfn << PAGE_SHIFT; - int num_nodes; - int i; + int i, ret; + + memset(&ei, 0, sizeof(ei)); for (i = 0; i < MAX_NUMNODES; i++) emu_nid_to_phys[i] = NUMA_NO_NODE; @@ -891,52 +911,33 @@ static int __init numa_emulation(int acpi, int amd) u64 size; size = memparse(emu_cmdline, &emu_cmdline); - num_nodes = split_nodes_size_interleave(0, max_addr, size); + ret = split_nodes_size_interleave(&ei, 0, max_addr, size); } else { unsigned long n; n = simple_strtoul(emu_cmdline, NULL, 0); - num_nodes = split_nodes_interleave(0, max_addr, n); + ret = split_nodes_interleave(&ei, 0, max_addr, n); + } + + if (ret < 0) + return false; + + if (numa_cleanup_meminfo(&ei) < 0) { + pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); + return false; } - if (num_nodes < 0) - return num_nodes; + /* commit */ + numa_meminfo = ei; /* make sure all emulated nodes are mapped to a physical node */ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) if (emu_nid_to_phys[i] == NUMA_NO_NODE) emu_nid_to_phys[i] = 0; - ei.nr_blks = num_nodes; - for (i = 0; i < ei.nr_blks; i++) { - ei.blk[i].start = nodes[i].start; - ei.blk[i].end = nodes[i].end; - ei.blk[i].nid = i; - } - - memnode_shift = compute_hash_shift(&ei); - if (memnode_shift < 0) { - memnode_shift = 0; - printk(KERN_ERR "No NUMA hash function found. NUMA emulation " - "disabled.\n"); - return -1; - } - - /* - * We need to vacate all active ranges that may have been registered for - * the e820 memory map. - */ - remove_all_active_ranges(); - for_each_node_mask(i, node_possible_map) - memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, - nodes[i].end >> PAGE_SHIFT); - init_memory_mapping_high(); - for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - fake_physnodes(acpi, amd, num_nodes); - numa_init_array(); + fake_physnodes(acpi, amd, &ei); numa_emu_dist = true; - return 0; + return true; } #endif /* CONFIG_NUMA_EMU */ @@ -988,15 +989,13 @@ void __init initmem_init(void) continue; #ifdef CONFIG_NUMA_EMU setup_physnodes(0, max_pfn << PAGE_SHIFT); - if (emu_cmdline && !numa_emulation(i == 0, i == 1)) - return; - - /* not emulating, build identity mapping for numa_add_cpu() */ - for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) - emu_nid_to_phys[j] = j; - - nodes_clear(node_possible_map); - nodes_clear(node_online_map); + /* + * If requested, try emulation. If emulation is not used, + * build identity emu_nid_to_phys[] for numa_add_cpu() + */ + if (!emu_cmdline || !numa_emulation(i == 0, i == 1)) + for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) + emu_nid_to_phys[j] = j; #endif if (numa_register_memblks(&numa_meminfo) < 0) continue; -- cgit v0.10.2 From 775ee85d7bff8ce7c7eccde90eda400658b650a3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Wrap node ID during emulation Both emulation layout functions - split_nodes[_size]_interleave() - didn't wrap emulated nid while laying out the fake nodes and tried to avoid interating over the specified number of nodes, which is fragile. Now that the emulation code generates numa_meminfo, the node memblks don't need to be consecutive and emulated node IDs can simply wrap. This makes the code more robust and is necessary for updates to better handle the cases where the physical nodes are interleaved. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 9736204..dc95165 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -768,15 +768,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; - /* - * Avoid allocating more nodes than requested, which can - * happen as a result of rounding down each node's size - * to FAKE_NODE_MIN_SIZE. - */ - if (nodes_weight(physnode_mask) + nid >= nr_nodes) - end = physnodes[i].end; - - ret = emu_setup_memblk(ei, nid++, i, + ret = emu_setup_memblk(ei, nid++ % nr_nodes, i, physnodes[i].start, min(end, physnodes[i].end)); if (ret < 0) @@ -873,7 +865,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; - ret = emu_setup_memblk(ei, nid++, i, + ret = emu_setup_memblk(ei, nid++ % MAX_NUMNODES, i, physnodes[i].start, min(end, physnodes[i].end)); if (ret < 0) -- cgit v0.10.2 From 1cca53407336fb6a86092e36dbc5c1e4d45d912b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Emulate directly from numa_meminfo NUMA emulation built physnodes[] array which could only represent configurations from the physical meminfo and emulated nodes using the information. There's no reason to take this extra level of indirection. Update emulation functions so that they operate directly on numa_meminfo. This simplifies the code and makes emulation layout behave better with interleaved physical nodes. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index dc95165..bd086eb 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -541,8 +541,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) #ifdef CONFIG_NUMA_EMU /* Numa emulation */ -static struct bootnode physnodes[MAX_NUMNODES] __initdata; - static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; static char *emu_cmdline __initdata; @@ -551,6 +549,16 @@ void __init numa_emu_cmdline(char *str) emu_cmdline = str; } +static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) +{ + int i; + + for (i = 0; i < mi->nr_blks; i++) + if (mi->blk[i].nid == nid) + return i; + return -ENOENT; +} + int __init find_node_by_addr(unsigned long addr) { const struct numa_meminfo *mi = &numa_meminfo; @@ -568,63 +576,6 @@ int __init find_node_by_addr(unsigned long addr) return NUMA_NO_NODE; } -static int __init setup_physnodes(unsigned long start, unsigned long end) -{ - const struct numa_meminfo *mi = &numa_meminfo; - int ret = 0; - int i; - - memset(physnodes, 0, sizeof(physnodes)); - - for (i = 0; i < mi->nr_blks; i++) { - int nid = mi->blk[i].nid; - - if (physnodes[nid].start == physnodes[nid].end) { - physnodes[nid].start = mi->blk[i].start; - physnodes[nid].end = mi->blk[i].end; - } else { - physnodes[nid].start = min(physnodes[nid].start, - mi->blk[i].start); - physnodes[nid].end = max(physnodes[nid].end, - mi->blk[i].end); - } - } - - /* - * Basic sanity checking on the physical node map: there may be errors - * if the SRAT or AMD code incorrectly reported the topology or the mem= - * kernel parameter is used. - */ - for (i = 0; i < MAX_NUMNODES; i++) { - if (physnodes[i].start == physnodes[i].end) - continue; - if (physnodes[i].start > end) { - physnodes[i].end = physnodes[i].start; - continue; - } - if (physnodes[i].end < start) { - physnodes[i].start = physnodes[i].end; - continue; - } - if (physnodes[i].start < start) - physnodes[i].start = start; - if (physnodes[i].end > end) - physnodes[i].end = end; - ret++; - } - - /* - * If no physical topology was detected, a single node is faked to cover - * the entire address space. - */ - if (!ret) { - physnodes[ret].start = start; - physnodes[ret].end = end; - ret = 1; - } - return ret; -} - static void __init fake_physnodes(int acpi, int amd, const struct numa_meminfo *ei) { @@ -663,9 +614,11 @@ static void __init fake_physnodes(int acpi, int amd, * something went wrong, 0 otherwise. */ static int __init emu_setup_memblk(struct numa_meminfo *ei, - int nid, int physnid, u64 start, u64 end) + struct numa_meminfo *pi, + int nid, int phys_blk, u64 size) { struct numa_memblk *eb = &ei->blk[ei->nr_blks]; + struct numa_memblk *pb = &pi->blk[phys_blk]; if (ei->nr_blks >= NR_NODE_MEMBLKS) { pr_err("NUMA: Too many emulated memblks, failing emulation\n"); @@ -673,12 +626,18 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei, } ei->nr_blks++; - eb->start = start; - eb->end = end; + eb->start = pb->start; + eb->end = pb->start + size; eb->nid = nid; if (emu_nid_to_phys[nid] == NUMA_NO_NODE) - emu_nid_to_phys[nid] = physnid; + emu_nid_to_phys[nid] = pb->nid; + + pb->start += size; + if (pb->start >= pb->end) { + WARN_ON_ONCE(pb->start > pb->end); + numa_remove_memblk_from(phys_blk, pi); + } printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, eb->start, eb->end, (eb->end - eb->start) >> 20); @@ -690,6 +649,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei, * to max_addr. The return value is the number of nodes allocated. */ static int __init split_nodes_interleave(struct numa_meminfo *ei, + struct numa_meminfo *pi, u64 addr, u64 max_addr, int nr_nodes) { nodemask_t physnode_mask = NODE_MASK_NONE; @@ -721,9 +681,8 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, return -1; } - for (i = 0; i < MAX_NUMNODES; i++) - if (physnodes[i].start != physnodes[i].end) - node_set(i, physnode_mask); + for (i = 0; i < pi->nr_blks; i++) + node_set(pi->blk[i].nid, physnode_mask); /* * Continue to fill physical nodes with fake nodes until there is no @@ -731,8 +690,18 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, */ while (nodes_weight(physnode_mask)) { for_each_node_mask(i, physnode_mask) { - u64 end = physnodes[i].start + size; u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); + u64 start, limit, end; + int phys_blk; + + phys_blk = emu_find_memblk_by_nid(i, pi); + if (phys_blk < 0) { + node_clear(i, physnode_mask); + continue; + } + start = pi->blk[phys_blk].start; + limit = pi->blk[phys_blk].end; + end = start + size; if (nid < big) end += FAKE_NODE_MIN_SIZE; @@ -741,11 +710,11 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * Continue to add memory to this fake node if its * non-reserved memory is less than the per-node size. */ - while (end - physnodes[i].start - - memblock_x86_hole_size(physnodes[i].start, end) < size) { + while (end - start - + memblock_x86_hole_size(start, end) < size) { end += FAKE_NODE_MIN_SIZE; - if (end > physnodes[i].end) { - end = physnodes[i].end; + if (end > limit) { + end = limit; break; } } @@ -764,19 +733,15 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * next node, this one must extend to the end of the * physical node. */ - if (physnodes[i].end - end - - memblock_x86_hole_size(end, physnodes[i].end) < size) - end = physnodes[i].end; + if (limit - end - + memblock_x86_hole_size(end, limit) < size) + end = limit; - ret = emu_setup_memblk(ei, nid++ % nr_nodes, i, - physnodes[i].start, - min(end, physnodes[i].end)); + ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, + phys_blk, + min(end, limit) - start); if (ret < 0) return ret; - - physnodes[i].start = min(end, physnodes[i].end); - if (physnodes[i].start == physnodes[i].end) - node_clear(i, physnode_mask); } } return 0; @@ -805,6 +770,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) * `addr' to `max_addr'. The return value is the number of nodes allocated. */ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, + struct numa_meminfo *pi, u64 addr, u64 max_addr, u64 size) { nodemask_t physnode_mask = NODE_MASK_NONE; @@ -833,9 +799,9 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, } size &= FAKE_NODE_MIN_HASH_MASK; - for (i = 0; i < MAX_NUMNODES; i++) - if (physnodes[i].start != physnodes[i].end) - node_set(i, physnode_mask); + for (i = 0; i < pi->nr_blks; i++) + node_set(pi->blk[i].nid, physnode_mask); + /* * Fill physical nodes with fake nodes of size until there is no memory * left on any of them. @@ -843,10 +809,18 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, while (nodes_weight(physnode_mask)) { for_each_node_mask(i, physnode_mask) { u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; - u64 end; + u64 start, limit, end; + int phys_blk; - end = find_end_of_node(physnodes[i].start, - physnodes[i].end, size); + phys_blk = emu_find_memblk_by_nid(i, pi); + if (phys_blk < 0) { + node_clear(i, physnode_mask); + continue; + } + start = pi->blk[phys_blk].start; + limit = pi->blk[phys_blk].end; + + end = find_end_of_node(start, limit, size); /* * If there won't be at least FAKE_NODE_MIN_SIZE of * non-reserved memory in ZONE_DMA32 for the next node, @@ -861,19 +835,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, * next node, this one must extend to the end of the * physical node. */ - if (physnodes[i].end - end - - memblock_x86_hole_size(end, physnodes[i].end) < size) - end = physnodes[i].end; + if (limit - end - + memblock_x86_hole_size(end, limit) < size) + end = limit; - ret = emu_setup_memblk(ei, nid++ % MAX_NUMNODES, i, - physnodes[i].start, - min(end, physnodes[i].end)); + ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, + phys_blk, + min(end, limit) - start); if (ret < 0) return ret; - - physnodes[i].start = min(end, physnodes[i].end); - if (physnodes[i].start == physnodes[i].end) - node_clear(i, physnode_mask); } } return 0; @@ -886,10 +856,12 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, static bool __init numa_emulation(int acpi, int amd) { static struct numa_meminfo ei __initdata; + static struct numa_meminfo pi __initdata; const u64 max_addr = max_pfn << PAGE_SHIFT; int i, ret; memset(&ei, 0, sizeof(ei)); + pi = numa_meminfo; for (i = 0; i < MAX_NUMNODES; i++) emu_nid_to_phys[i] = NUMA_NO_NODE; @@ -903,12 +875,12 @@ static bool __init numa_emulation(int acpi, int amd) u64 size; size = memparse(emu_cmdline, &emu_cmdline); - ret = split_nodes_size_interleave(&ei, 0, max_addr, size); + ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); } else { unsigned long n; n = simple_strtoul(emu_cmdline, NULL, 0); - ret = split_nodes_interleave(&ei, 0, max_addr, n); + ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); } if (ret < 0) @@ -980,7 +952,6 @@ void __init initmem_init(void) if (numa_cleanup_meminfo(&numa_meminfo) < 0) continue; #ifdef CONFIG_NUMA_EMU - setup_physnodes(0, max_pfn << PAGE_SHIFT); /* * If requested, try emulation. If emulation is not used, * build identity emu_nid_to_phys[] for numa_add_cpu() -- cgit v0.10.2 From 6b78cb549b4105cbf7c6f7461f27a21f00c44997 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Unify emulated apicid -> node mapping transformation NUMA emulation changes node mappings and thus apicid -> node mapping needs to be updated accordingly. srat_64 and amdtopology_64 did this separately; however, all the necessary information is the mapping from emulated nodes to physical nodes which is available in emu_nid_to_phys[]. Implement common __apicid_to_node[] transformation in numa_emulation() and drop duplicate implementations. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index fd7b609..f37ea2f 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -196,10 +196,6 @@ int __init amd_numa_init(void) } #ifdef CONFIG_NUMA_EMU -static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; - /* * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be * setup to represent the physical topology but reflect the emulated @@ -224,20 +220,15 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) for (i = 0; i < nr_nodes; i++) { int index; int nid; - int j; nid = find_node_by_addr(nodes[i].start); if (nid == NUMA_NO_NODE) continue; index = nodeids[nid] << bits; - if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE) - for (j = apicid_base; j < cores + apicid_base; j++) - fake_apicid_to_node[index + j] = i; #ifdef CONFIG_ACPI_NUMA __acpi_map_pxm_to_node(nid, i); #endif } - memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); } #endif /* CONFIG_NUMA_EMU */ diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index bd086eb..722039e 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -858,7 +858,7 @@ static bool __init numa_emulation(int acpi, int amd) static struct numa_meminfo ei __initdata; static struct numa_meminfo pi __initdata; const u64 max_addr = max_pfn << PAGE_SHIFT; - int i, ret; + int i, j, ret; memset(&ei, 0, sizeof(ei)); pi = numa_meminfo; @@ -894,6 +894,20 @@ static bool __init numa_emulation(int acpi, int amd) /* commit */ numa_meminfo = ei; + /* + * Transform __apicid_to_node table to use emulated nids by + * reverse-mapping phys_nid. The maps should always exist but fall + * back to zero just in case. + */ + for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { + if (__apicid_to_node[i] == NUMA_NO_NODE) + continue; + for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) + if (__apicid_to_node[i] == emu_nid_to_phys[j]) + break; + __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; + } + /* make sure all emulated nodes are mapped to a physical node */ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) if (emu_nid_to_phys[i] == NUMA_NO_NODE) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index d2f53f3..d4fbfea 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -265,9 +265,6 @@ int __init x86_acpi_numa_init(void) static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = { [0 ... MAX_NUMNODES-1] = PXM_INVAL }; -static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; /* * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID @@ -279,7 +276,7 @@ static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { */ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) { - int i, j; + int i; for (i = 0; i < num_nodes; i++) { int nid, pxm; @@ -291,29 +288,10 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) if (pxm == PXM_INVAL) continue; fake_node_to_pxm_map[i] = pxm; - /* - * For each apicid_to_node mapping that exists for this real - * node, it must now point to the fake node ID. - */ - for (j = 0; j < MAX_LOCAL_APIC; j++) - if (__apicid_to_node[j] == nid && - fake_apicid_to_node[j] == NUMA_NO_NODE) - fake_apicid_to_node[j] = i; } - /* - * If there are apicid-to-node mappings for physical nodes that do not - * have a corresponding emulated node, it should default to a guaranteed - * value. - */ - for (i = 0; i < MAX_LOCAL_APIC; i++) - if (__apicid_to_node[i] != NUMA_NO_NODE && - fake_apicid_to_node[i] == NUMA_NO_NODE) - fake_apicid_to_node[i] = 0; - for (i = 0; i < num_nodes; i++) __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); - memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); for (i = 0; i < num_nodes; i++) if (fake_nodes[i].start != fake_nodes[i].end) -- cgit v0.10.2 From e23bba604433a202cd301a976454a90ea6b783ef Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 17:11:10 +0100 Subject: x86-64, NUMA: Unify emulated distance mapping NUMA emulation needs to update node distance information. It did it by remapping apicid to PXM mapping, even when amdtopology is being used. There is no reason to go through such convolution. The generic code has all the information necessary to transform the distance table to the emulated nid space. Implement generic distance table transformation in numa_emulation() and drop private implementations in srat_64 and amdtopology_64. This makes find_node_by_addr() and fake_physnodes() and related functions unnecessary, drop them. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 9c9fe1b..a37da6d 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -186,12 +186,6 @@ struct bootnode; #ifdef CONFIG_ACPI_NUMA extern int acpi_numa; extern int x86_acpi_numa_init(void); - -#ifdef CONFIG_NUMA_EMU -extern void acpi_fake_nodes(const struct bootnode *fake_nodes, - int num_nodes); -extern int acpi_emu_node_distance(int a, int b); -#endif #endif /* CONFIG_ACPI_NUMA */ #define acpi_unlazy_tlb(x) leave_mm(x) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 384d118..e264ae5 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -20,10 +20,6 @@ extern int amd_numa_init(void); extern int amd_get_subcaches(int); extern int amd_set_subcaches(int, int); -#ifdef CONFIG_NUMA_EMU -extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); -#endif - struct amd_northbridge { struct pci_dev *misc; struct pci_dev *link; diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 5361c59..344eb17 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -34,7 +34,6 @@ extern void __init numa_set_distance(int from, int to, int distance); #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) void numa_emu_cmdline(char *); -int __init find_node_by_addr(unsigned long addr); #endif /* CONFIG_NUMA_EMU */ #else static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index f37ea2f..0919c26 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -194,41 +194,3 @@ int __init amd_numa_init(void) return 0; } - -#ifdef CONFIG_NUMA_EMU -/* - * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be - * setup to represent the physical topology but reflect the emulated - * environment. For each emulated node, the real node which it appears on is - * found and a fake pxm to nid mapping is created which mirrors the actual - * locality. node_distance() then represents the correct distances between - * emulated nodes by using the fake acpi mappings to pxms. - */ -void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) -{ - unsigned int bits; - unsigned int cores; - unsigned int apicid_base = 0; - int i; - - bits = boot_cpu_data.x86_coreid_bits; - cores = 1 << bits; - early_get_boot_cpu_id(); - if (boot_cpu_physical_apicid > 0) - apicid_base = boot_cpu_physical_apicid; - - for (i = 0; i < nr_nodes; i++) { - int index; - int nid; - - nid = find_node_by_addr(nodes[i].start); - if (nid == NUMA_NO_NODE) - continue; - - index = nodeids[nid] << bits; -#ifdef CONFIG_ACPI_NUMA - __acpi_map_pxm_to_node(nid, i); -#endif - } -} -#endif /* CONFIG_NUMA_EMU */ diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 722039e..8ce6177 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -48,10 +48,6 @@ static struct numa_meminfo numa_meminfo __initdata; static int numa_distance_cnt; static u8 *numa_distance; -#ifdef CONFIG_NUMA_EMU -static bool numa_emu_dist; -#endif - /* * Given a shift value, try to populate memnodemap[] * Returns : @@ -443,10 +439,6 @@ void __init numa_set_distance(int from, int to, int distance) int __node_distance(int from, int to) { -#if defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA_EMU) - if (numa_emu_dist) - return acpi_emu_node_distance(from, to); -#endif if (from >= numa_distance_cnt || to >= numa_distance_cnt) return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; return numa_distance[from * numa_distance_cnt + to]; @@ -559,56 +551,6 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) return -ENOENT; } -int __init find_node_by_addr(unsigned long addr) -{ - const struct numa_meminfo *mi = &numa_meminfo; - int i; - - for (i = 0; i < mi->nr_blks; i++) { - /* - * Find the real node that this emulated node appears on. For - * the sake of simplicity, we only use a real node's starting - * address to determine which emulated node it appears on. - */ - if (addr >= mi->blk[i].start && addr < mi->blk[i].end) - return mi->blk[i].nid; - } - return NUMA_NO_NODE; -} - -static void __init fake_physnodes(int acpi, int amd, - const struct numa_meminfo *ei) -{ - static struct bootnode nodes[MAX_NUMNODES] __initdata; - int i, nr_nodes = 0; - - for (i = 0; i < ei->nr_blks; i++) { - int nid = ei->blk[i].nid; - - if (nodes[nid].start == nodes[nid].end) { - nodes[nid].start = ei->blk[i].start; - nodes[nid].end = ei->blk[i].end; - nr_nodes++; - } else { - nodes[nid].start = min(ei->blk[i].start, nodes[nid].start); - nodes[nid].end = max(ei->blk[i].end, nodes[nid].end); - } - } - - BUG_ON(acpi && amd); -#ifdef CONFIG_ACPI_NUMA - if (acpi) - acpi_fake_nodes(nodes, nr_nodes); -#endif -#ifdef CONFIG_AMD_NUMA - if (amd) - amd_fake_nodes(nodes, nr_nodes); -#endif - if (!acpi && !amd) - for (i = 0; i < nr_cpu_ids; i++) - numa_set_node(i, 0); -} - /* * Sets up nid to range from @start to @end. The return value is -errno if * something went wrong, 0 otherwise. @@ -853,11 +795,13 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, * Sets up the system RAM area from start_pfn to last_pfn according to the * numa=fake command-line option. */ -static bool __init numa_emulation(int acpi, int amd) +static bool __init numa_emulation(void) { static struct numa_meminfo ei __initdata; static struct numa_meminfo pi __initdata; const u64 max_addr = max_pfn << PAGE_SHIFT; + int phys_dist_cnt = numa_distance_cnt; + u8 *phys_dist = NULL; int i, j, ret; memset(&ei, 0, sizeof(ei)); @@ -891,6 +835,25 @@ static bool __init numa_emulation(int acpi, int amd) return false; } + /* + * Copy the original distance table. It's temporary so no need to + * reserve it. + */ + if (phys_dist_cnt) { + size_t size = phys_dist_cnt * sizeof(numa_distance[0]); + u64 phys; + + phys = memblock_find_in_range(0, + (u64)max_pfn_mapped << PAGE_SHIFT, + size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); + return false; + } + phys_dist = __va(phys); + memcpy(phys_dist, numa_distance, size); + } + /* commit */ numa_meminfo = ei; @@ -913,8 +876,23 @@ static bool __init numa_emulation(int acpi, int amd) if (emu_nid_to_phys[i] == NUMA_NO_NODE) emu_nid_to_phys[i] = 0; - fake_physnodes(acpi, amd, &ei); - numa_emu_dist = true; + /* transform distance table */ + numa_reset_distance(); + for (i = 0; i < MAX_NUMNODES; i++) { + for (j = 0; j < MAX_NUMNODES; j++) { + int physi = emu_nid_to_phys[i]; + int physj = emu_nid_to_phys[j]; + int dist; + + if (physi >= phys_dist_cnt || physj >= phys_dist_cnt) + dist = physi == physj ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + else + dist = phys_dist[physi * phys_dist_cnt + physj]; + + numa_set_distance(i, j, dist); + } + } return true; } #endif /* CONFIG_NUMA_EMU */ @@ -970,7 +948,7 @@ void __init initmem_init(void) * If requested, try emulation. If emulation is not used, * build identity emu_nid_to_phys[] for numa_add_cpu() */ - if (!emu_cmdline || !numa_emulation(i == 0, i == 1)) + if (!emu_cmdline || !numa_emulation()) for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) emu_nid_to_phys[j] = j; #endif diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index d4fbfea..8e9d339 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -26,8 +26,6 @@ int acpi_numa __initdata; -static struct acpi_table_slit *acpi_slit; - static struct bootnode nodes_add[MAX_NUMNODES]; static __init int setup_node(int pxm) @@ -51,25 +49,11 @@ static __init inline int srat_disabled(void) void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { int i, j; - unsigned length; - unsigned long phys; for (i = 0; i < slit->locality_count; i++) for (j = 0; j < slit->locality_count; j++) numa_set_distance(pxm_to_node(i), pxm_to_node(j), slit->entry[slit->locality_count * i + j]); - - /* acpi_slit is used only by emulation */ - length = slit->header.length; - phys = memblock_find_in_range(0, max_pfn_mapped< x2APIC mapping */ @@ -261,55 +245,6 @@ int __init x86_acpi_numa_init(void) return srat_disabled() ? -EINVAL : 0; } -#ifdef CONFIG_NUMA_EMU -static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = { - [0 ... MAX_NUMNODES-1] = PXM_INVAL -}; - -/* - * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID - * mappings that respect the real ACPI topology but reflect our emulated - * environment. For each emulated node, we find which real node it appears on - * and create PXM to NID mappings for those fake nodes which mirror that - * locality. SLIT will now represent the correct distances between emulated - * nodes as a result of the real topology. - */ -void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) -{ - int i; - - for (i = 0; i < num_nodes; i++) { - int nid, pxm; - - nid = find_node_by_addr(fake_nodes[i].start); - if (nid == NUMA_NO_NODE) - continue; - pxm = node_to_pxm(nid); - if (pxm == PXM_INVAL) - continue; - fake_node_to_pxm_map[i] = pxm; - } - - for (i = 0; i < num_nodes; i++) - __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); - - for (i = 0; i < num_nodes; i++) - if (fake_nodes[i].start != fake_nodes[i].end) - node_set(i, numa_nodes_parsed); -} - -int acpi_emu_node_distance(int a, int b) -{ - int index; - - if (!acpi_slit) - return node_to_pxm(a) == node_to_pxm(b) ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - index = acpi_slit->locality_count * node_to_pxm(a); - return acpi_slit->entry[index + node_to_pxm(b)]; -} -#endif /* CONFIG_NUMA_EMU */ - #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) int memory_add_physaddr_to_nid(u64 start) { -- cgit v0.10.2 From 2ca230baeb7c61864cab9b53e37a3da28a2ca7e5 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 17 Feb 2011 14:46:37 +0100 Subject: x86-64, NUMA: Don't call __pa() with invalid address in numa_reset_distance() Do not call __pa(numa_distance) if it was not allocated before. Calling with invalid address triggers VIRTUAL_BUG_ON() in __phys_addr() if CONFIG_DEBUG_VIRTUAL. Also reported by Ingo. http://thread.gmane.org/gmane.linux.kernel/1101306/focus=1101785 - v2: Change to check existing path as tj requested. - tj: Description update. Signed-off-by: Yinghai Lu Signed-off-by: Tejun Heo Reported-by: Ingo Molnar diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 8ce6177..1bd6de4 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -371,11 +371,13 @@ static void __init numa_reset_distance(void) { size_t size; - size = numa_distance_cnt * sizeof(numa_distance[0]); - memblock_x86_free_range(__pa(numa_distance), - __pa(numa_distance) + size); + if (numa_distance_cnt) { + size = numa_distance_cnt * sizeof(numa_distance[0]); + memblock_x86_free_range(__pa(numa_distance), + __pa(numa_distance) + size); + numa_distance_cnt = 0; + } numa_distance = NULL; - numa_distance_cnt = 0; } /* -- cgit v0.10.2 From 6d496f9f232790d44144f3784856290e0b27b8f3 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 17 Feb 2011 14:53:20 +0100 Subject: x86-64, NUMA: Put dummy_numa_init() in the init section dummy_numa_init() is used only during system boot. Put it in .init like other NUMA init functions. - tj: Description update. Signed-off-by: Yinghai Lu Signed-off-by: Tejun Heo diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 1bd6de4..f6d85e3 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -899,7 +899,7 @@ static bool __init numa_emulation(void) } #endif /* CONFIG_NUMA_EMU */ -static int dummy_numa_init(void) +static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); -- cgit v0.10.2 From 69efcc6d90d234a3a076afb2c635c1609536faa4 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 21 Feb 2011 10:58:13 +0100 Subject: x86-64, NUMA: Do not scan two times for setup_node_bootmem() By the time setup_node_bootmem() is called, all the memblocks are already registered. As node_data is allocated from these memblocks, calling it more than once doesn't make any difference. Drop the loop. tj: Dropped comment referencing to the old behavior as suggested by David and rephrased the description. Signed-off-by: Yinghai Lu Acked-by: David Rientjes Signed-off-by: Tejun Heo diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index f6d85e3..6e4ee96 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -480,7 +480,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) static int __init numa_register_memblks(struct numa_meminfo *mi) { - int i, j, nid; + int i, nid; /* Account for nodes with cpus and no memory */ node_possible_map = numa_nodes_parsed; @@ -506,28 +506,20 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) init_memory_mapping_high(); - /* - * Finally register nodes. Do it twice in case setup_node_bootmem - * missed one due to missing bootmem. - */ - for (i = 0; i < 2; i++) { - for_each_node_mask(nid, node_possible_map) { - u64 start = (u64)max_pfn << PAGE_SHIFT; - u64 end = 0; + /* Finally register nodes. */ + for_each_node_mask(nid, node_possible_map) { + u64 start = (u64)max_pfn << PAGE_SHIFT; + u64 end = 0; - if (node_online(nid)) + for (i = 0; i < mi->nr_blks; i++) { + if (nid != mi->blk[i].nid) continue; - - for (j = 0; j < mi->nr_blks; j++) { - if (nid != mi->blk[j].nid) - continue; - start = min(mi->blk[j].start, start); - end = max(mi->blk[j].end, end); - } - - if (start < end) - setup_node_bootmem(nid, start, end); + start = min(mi->blk[i].start, start); + end = max(mi->blk[i].end, end); } + + if (start < end) + setup_node_bootmem(nid, start, end); } return 0; -- cgit v0.10.2 From fbe99959d1db85222829a64d869dcab704ac7ec8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 22 Feb 2011 11:10:08 +0100 Subject: x86-64, NUMA: Prepare numa_emulation() for moving NUMA emulation into a separate file Update numa_emulation() such that, it - takes @numa_meminfo and @numa_dist_cnt instead of directly referencing the global variables. - copies the distance table by iterating each distance with node_distance() instead of memcpy'ing the distance table. - tests emu_cmdline to determine whether emulation is requested and fills emu_nid_to_phys[] with identity mapping if emulation is not used. This allows the caller to call numa_emulation() unconditionally and makes return value unncessary. - defines dummy version if CONFIG_NUMA_EMU is disabled. This patch doesn't introduce any behavior change. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Ingo Molnar diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 6e4ee96..980d514 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -789,17 +789,20 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, * Sets up the system RAM area from start_pfn to last_pfn according to the * numa=fake command-line option. */ -static bool __init numa_emulation(void) +static void __init numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt) { static struct numa_meminfo ei __initdata; static struct numa_meminfo pi __initdata; const u64 max_addr = max_pfn << PAGE_SHIFT; - int phys_dist_cnt = numa_distance_cnt; u8 *phys_dist = NULL; int i, j, ret; + if (!emu_cmdline) + goto no_emu; + memset(&ei, 0, sizeof(ei)); - pi = numa_meminfo; + pi = *numa_meminfo; for (i = 0; i < MAX_NUMNODES; i++) emu_nid_to_phys[i] = NUMA_NO_NODE; @@ -822,19 +825,19 @@ static bool __init numa_emulation(void) } if (ret < 0) - return false; + goto no_emu; if (numa_cleanup_meminfo(&ei) < 0) { pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); - return false; + goto no_emu; } /* * Copy the original distance table. It's temporary so no need to * reserve it. */ - if (phys_dist_cnt) { - size_t size = phys_dist_cnt * sizeof(numa_distance[0]); + if (numa_dist_cnt) { + size_t size = numa_dist_cnt * sizeof(phys_dist[0]); u64 phys; phys = memblock_find_in_range(0, @@ -842,14 +845,18 @@ static bool __init numa_emulation(void) size, PAGE_SIZE); if (phys == MEMBLOCK_ERROR) { pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); - return false; + goto no_emu; } phys_dist = __va(phys); - memcpy(phys_dist, numa_distance, size); + + for (i = 0; i < numa_dist_cnt; i++) + for (j = 0; j < numa_dist_cnt; j++) + phys_dist[i * numa_dist_cnt + j] = + node_distance(i, j); } /* commit */ - numa_meminfo = ei; + *numa_meminfo = ei; /* * Transform __apicid_to_node table to use emulated nids by @@ -878,18 +885,27 @@ static bool __init numa_emulation(void) int physj = emu_nid_to_phys[j]; int dist; - if (physi >= phys_dist_cnt || physj >= phys_dist_cnt) + if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) dist = physi == physj ? LOCAL_DISTANCE : REMOTE_DISTANCE; else - dist = phys_dist[physi * phys_dist_cnt + physj]; + dist = phys_dist[physi * numa_dist_cnt + physj]; numa_set_distance(i, j, dist); } } - return true; + return; + +no_emu: + /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + emu_nid_to_phys[i] = i; } -#endif /* CONFIG_NUMA_EMU */ +#else /* CONFIG_NUMA_EMU */ +static inline void numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt) +{ } +#endif /* CONFIG_NUMA_EMU */ static int __init dummy_numa_init(void) { @@ -937,15 +953,9 @@ void __init initmem_init(void) if (numa_cleanup_meminfo(&numa_meminfo) < 0) continue; -#ifdef CONFIG_NUMA_EMU - /* - * If requested, try emulation. If emulation is not used, - * build identity emu_nid_to_phys[] for numa_add_cpu() - */ - if (!emu_cmdline || !numa_emulation()) - for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) - emu_nid_to_phys[j] = j; -#endif + + numa_emulation(&numa_meminfo, numa_distance_cnt); + if (numa_register_memblks(&numa_meminfo) < 0) continue; -- cgit v0.10.2 From b8ef9172b2aad7eeb1fcd37a9e632c7b24da1f64 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 22 Feb 2011 11:10:08 +0100 Subject: x86-64, NUMA: Move NUMA emulation into numa_emulation.c Create numa_emulation.c and move all NUMA emulation code there. The definitions of struct numa_memblk and numa_meminfo are moved to numa_64.h. Also, numa_remove_memblk_from(), numa_cleanup_meminfo(), numa_reset_distance() along with numa_emulation() are made global. - v2: Internal declarations moved to numa_internal.h as suggested by Yinghai. Signed-off-by: Tejun Heo Acked-by: Yinghai Lu Cc: Ingo Molnar diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 09df2f9..3e608ed 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 980d514..45a361b 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -18,20 +18,10 @@ #include #include #include -#include #include #include -struct numa_memblk { - u64 start; - u64 end; - int nid; -}; - -struct numa_meminfo { - int nr_blks; - struct numa_memblk blk[NR_NODE_MEMBLKS]; -}; +#include "numa_internal.h" struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); @@ -215,7 +205,7 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end, return 0; } -static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) +void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) { mi->nr_blks--; memmove(&mi->blk[idx], &mi->blk[idx + 1], @@ -273,7 +263,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } -static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) +int __init numa_cleanup_meminfo(struct numa_meminfo *mi) { const u64 low = 0; const u64 high = (u64)max_pfn << PAGE_SHIFT; @@ -367,7 +357,7 @@ static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, * Reset distance table. The current table is freed. The next * numa_set_distance() call will create a new one. */ -static void __init numa_reset_distance(void) +void __init numa_reset_distance(void) { size_t size; @@ -525,388 +515,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) return 0; } -#ifdef CONFIG_NUMA_EMU -/* Numa emulation */ -static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; -static char *emu_cmdline __initdata; - -void __init numa_emu_cmdline(char *str) -{ - emu_cmdline = str; -} - -static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) -{ - int i; - - for (i = 0; i < mi->nr_blks; i++) - if (mi->blk[i].nid == nid) - return i; - return -ENOENT; -} - -/* - * Sets up nid to range from @start to @end. The return value is -errno if - * something went wrong, 0 otherwise. - */ -static int __init emu_setup_memblk(struct numa_meminfo *ei, - struct numa_meminfo *pi, - int nid, int phys_blk, u64 size) -{ - struct numa_memblk *eb = &ei->blk[ei->nr_blks]; - struct numa_memblk *pb = &pi->blk[phys_blk]; - - if (ei->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("NUMA: Too many emulated memblks, failing emulation\n"); - return -EINVAL; - } - - ei->nr_blks++; - eb->start = pb->start; - eb->end = pb->start + size; - eb->nid = nid; - - if (emu_nid_to_phys[nid] == NUMA_NO_NODE) - emu_nid_to_phys[nid] = pb->nid; - - pb->start += size; - if (pb->start >= pb->end) { - WARN_ON_ONCE(pb->start > pb->end); - numa_remove_memblk_from(phys_blk, pi); - } - - printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, - eb->start, eb->end, (eb->end - eb->start) >> 20); - return 0; -} - -/* - * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr - * to max_addr. The return value is the number of nodes allocated. - */ -static int __init split_nodes_interleave(struct numa_meminfo *ei, - struct numa_meminfo *pi, - u64 addr, u64 max_addr, int nr_nodes) -{ - nodemask_t physnode_mask = NODE_MASK_NONE; - u64 size; - int big; - int nid = 0; - int i, ret; - - if (nr_nodes <= 0) - return -1; - if (nr_nodes > MAX_NUMNODES) { - pr_info("numa=fake=%d too large, reducing to %d\n", - nr_nodes, MAX_NUMNODES); - nr_nodes = MAX_NUMNODES; - } - - size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; - /* - * Calculate the number of big nodes that can be allocated as a result - * of consolidating the remainder. - */ - big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / - FAKE_NODE_MIN_SIZE; - - size &= FAKE_NODE_MIN_HASH_MASK; - if (!size) { - pr_err("Not enough memory for each node. " - "NUMA emulation disabled.\n"); - return -1; - } - - for (i = 0; i < pi->nr_blks; i++) - node_set(pi->blk[i].nid, physnode_mask); - - /* - * Continue to fill physical nodes with fake nodes until there is no - * memory left on any of them. - */ - while (nodes_weight(physnode_mask)) { - for_each_node_mask(i, physnode_mask) { - u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); - u64 start, limit, end; - int phys_blk; - - phys_blk = emu_find_memblk_by_nid(i, pi); - if (phys_blk < 0) { - node_clear(i, physnode_mask); - continue; - } - start = pi->blk[phys_blk].start; - limit = pi->blk[phys_blk].end; - end = start + size; - - if (nid < big) - end += FAKE_NODE_MIN_SIZE; - - /* - * Continue to add memory to this fake node if its - * non-reserved memory is less than the per-node size. - */ - while (end - start - - memblock_x86_hole_size(start, end) < size) { - end += FAKE_NODE_MIN_SIZE; - if (end > limit) { - end = limit; - break; - } - } - - /* - * If there won't be at least FAKE_NODE_MIN_SIZE of - * non-reserved memory in ZONE_DMA32 for the next node, - * this one must extend to the boundary. - */ - if (end < dma32_end && dma32_end - end - - memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) - end = dma32_end; - - /* - * If there won't be enough non-reserved memory for the - * next node, this one must extend to the end of the - * physical node. - */ - if (limit - end - - memblock_x86_hole_size(end, limit) < size) - end = limit; - - ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, - phys_blk, - min(end, limit) - start); - if (ret < 0) - return ret; - } - } - return 0; -} - -/* - * Returns the end address of a node so that there is at least `size' amount of - * non-reserved memory or `max_addr' is reached. - */ -static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) -{ - u64 end = start + size; - - while (end - start - memblock_x86_hole_size(start, end) < size) { - end += FAKE_NODE_MIN_SIZE; - if (end > max_addr) { - end = max_addr; - break; - } - } - return end; -} - -/* - * Sets up fake nodes of `size' interleaved over physical nodes ranging from - * `addr' to `max_addr'. The return value is the number of nodes allocated. - */ -static int __init split_nodes_size_interleave(struct numa_meminfo *ei, - struct numa_meminfo *pi, - u64 addr, u64 max_addr, u64 size) -{ - nodemask_t physnode_mask = NODE_MASK_NONE; - u64 min_size; - int nid = 0; - int i, ret; - - if (!size) - return -1; - /* - * The limit on emulated nodes is MAX_NUMNODES, so the size per node is - * increased accordingly if the requested size is too small. This - * creates a uniform distribution of node sizes across the entire - * machine (but not necessarily over physical nodes). - */ - min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / - MAX_NUMNODES; - min_size = max(min_size, FAKE_NODE_MIN_SIZE); - if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) - min_size = (min_size + FAKE_NODE_MIN_SIZE) & - FAKE_NODE_MIN_HASH_MASK; - if (size < min_size) { - pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", - size >> 20, min_size >> 20); - size = min_size; - } - size &= FAKE_NODE_MIN_HASH_MASK; - - for (i = 0; i < pi->nr_blks; i++) - node_set(pi->blk[i].nid, physnode_mask); - - /* - * Fill physical nodes with fake nodes of size until there is no memory - * left on any of them. - */ - while (nodes_weight(physnode_mask)) { - for_each_node_mask(i, physnode_mask) { - u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; - u64 start, limit, end; - int phys_blk; - - phys_blk = emu_find_memblk_by_nid(i, pi); - if (phys_blk < 0) { - node_clear(i, physnode_mask); - continue; - } - start = pi->blk[phys_blk].start; - limit = pi->blk[phys_blk].end; - - end = find_end_of_node(start, limit, size); - /* - * If there won't be at least FAKE_NODE_MIN_SIZE of - * non-reserved memory in ZONE_DMA32 for the next node, - * this one must extend to the boundary. - */ - if (end < dma32_end && dma32_end - end - - memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) - end = dma32_end; - - /* - * If there won't be enough non-reserved memory for the - * next node, this one must extend to the end of the - * physical node. - */ - if (limit - end - - memblock_x86_hole_size(end, limit) < size) - end = limit; - - ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, - phys_blk, - min(end, limit) - start); - if (ret < 0) - return ret; - } - } - return 0; -} - -/* - * Sets up the system RAM area from start_pfn to last_pfn according to the - * numa=fake command-line option. - */ -static void __init numa_emulation(struct numa_meminfo *numa_meminfo, - int numa_dist_cnt) -{ - static struct numa_meminfo ei __initdata; - static struct numa_meminfo pi __initdata; - const u64 max_addr = max_pfn << PAGE_SHIFT; - u8 *phys_dist = NULL; - int i, j, ret; - - if (!emu_cmdline) - goto no_emu; - - memset(&ei, 0, sizeof(ei)); - pi = *numa_meminfo; - - for (i = 0; i < MAX_NUMNODES; i++) - emu_nid_to_phys[i] = NUMA_NO_NODE; - - /* - * If the numa=fake command-line contains a 'M' or 'G', it represents - * the fixed node size. Otherwise, if it is just a single number N, - * split the system RAM into N fake nodes. - */ - if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { - u64 size; - - size = memparse(emu_cmdline, &emu_cmdline); - ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); - } else { - unsigned long n; - - n = simple_strtoul(emu_cmdline, NULL, 0); - ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); - } - - if (ret < 0) - goto no_emu; - - if (numa_cleanup_meminfo(&ei) < 0) { - pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); - goto no_emu; - } - - /* - * Copy the original distance table. It's temporary so no need to - * reserve it. - */ - if (numa_dist_cnt) { - size_t size = numa_dist_cnt * sizeof(phys_dist[0]); - u64 phys; - - phys = memblock_find_in_range(0, - (u64)max_pfn_mapped << PAGE_SHIFT, - size, PAGE_SIZE); - if (phys == MEMBLOCK_ERROR) { - pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); - goto no_emu; - } - phys_dist = __va(phys); - - for (i = 0; i < numa_dist_cnt; i++) - for (j = 0; j < numa_dist_cnt; j++) - phys_dist[i * numa_dist_cnt + j] = - node_distance(i, j); - } - - /* commit */ - *numa_meminfo = ei; - - /* - * Transform __apicid_to_node table to use emulated nids by - * reverse-mapping phys_nid. The maps should always exist but fall - * back to zero just in case. - */ - for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { - if (__apicid_to_node[i] == NUMA_NO_NODE) - continue; - for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) - if (__apicid_to_node[i] == emu_nid_to_phys[j]) - break; - __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; - } - - /* make sure all emulated nodes are mapped to a physical node */ - for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) - if (emu_nid_to_phys[i] == NUMA_NO_NODE) - emu_nid_to_phys[i] = 0; - - /* transform distance table */ - numa_reset_distance(); - for (i = 0; i < MAX_NUMNODES; i++) { - for (j = 0; j < MAX_NUMNODES; j++) { - int physi = emu_nid_to_phys[i]; - int physj = emu_nid_to_phys[j]; - int dist; - - if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) - dist = physi == physj ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - else - dist = phys_dist[physi * numa_dist_cnt + physj]; - - numa_set_distance(i, j, dist); - } - } - return; - -no_emu: - /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ - for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) - emu_nid_to_phys[i] = i; -} -#else /* CONFIG_NUMA_EMU */ -static inline void numa_emulation(struct numa_meminfo *numa_meminfo, - int numa_dist_cnt) -{ } -#endif /* CONFIG_NUMA_EMU */ - static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", @@ -994,83 +602,3 @@ int __cpuinit numa_cpu_node(int cpu) return __apicid_to_node[apicid]; return NUMA_NO_NODE; } - -/* - * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use - * of 64bit specific data structures. The distinction is artificial and - * should be removed. numa_{add|remove}_cpu() are implemented in numa.c - * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when - * enabled. - * - * NUMA emulation is planned to be made generic and the following and other - * related code should be moved to numa.c. - */ -#ifdef CONFIG_NUMA_EMU -# ifndef CONFIG_DEBUG_PER_CPU_MAPS -void __cpuinit numa_add_cpu(int cpu) -{ - int physnid, nid; - - nid = numa_cpu_node(cpu); - if (nid == NUMA_NO_NODE) - nid = early_cpu_to_node(cpu); - BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); - - physnid = emu_nid_to_phys[nid]; - - /* - * Map the cpu to each emulated node that is allocated on the physical - * node of the cpu's apic id. - */ - for_each_online_node(nid) - if (emu_nid_to_phys[nid] == physnid) - cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); -} - -void __cpuinit numa_remove_cpu(int cpu) -{ - int i; - - for_each_online_node(i) - cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); -} -# else /* !CONFIG_DEBUG_PER_CPU_MAPS */ -static void __cpuinit numa_set_cpumask(int cpu, int enable) -{ - struct cpumask *mask; - int nid, physnid, i; - - nid = early_cpu_to_node(cpu); - if (nid == NUMA_NO_NODE) { - /* early_cpu_to_node() already emits a warning and trace */ - return; - } - - physnid = emu_nid_to_phys[nid]; - - for_each_online_node(i) { - if (emu_nid_to_phys[nid] != physnid) - continue; - - mask = debug_cpumask_set_cpu(cpu, enable); - if (!mask) - return; - - if (enable) - cpumask_set_cpu(cpu, mask); - else - cpumask_clear_cpu(cpu, mask); - } -} - -void __cpuinit numa_add_cpu(int cpu) -{ - numa_set_cpumask(cpu, 1); -} - -void __cpuinit numa_remove_cpu(int cpu) -{ - numa_set_cpumask(cpu, 0); -} -# endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ -#endif /* CONFIG_NUMA_EMU */ diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c new file mode 100644 index 0000000..23fa2d0 --- /dev/null +++ b/arch/x86/mm/numa_emulation.c @@ -0,0 +1,452 @@ +/* + * NUMA emulation + */ +#include +#include +#include +#include +#include + +#include "numa_internal.h" + +static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; +static char *emu_cmdline __initdata; + +void __init numa_emu_cmdline(char *str) +{ + emu_cmdline = str; +} + +static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) +{ + int i; + + for (i = 0; i < mi->nr_blks; i++) + if (mi->blk[i].nid == nid) + return i; + return -ENOENT; +} + +/* + * Sets up nid to range from @start to @end. The return value is -errno if + * something went wrong, 0 otherwise. + */ +static int __init emu_setup_memblk(struct numa_meminfo *ei, + struct numa_meminfo *pi, + int nid, int phys_blk, u64 size) +{ + struct numa_memblk *eb = &ei->blk[ei->nr_blks]; + struct numa_memblk *pb = &pi->blk[phys_blk]; + + if (ei->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: Too many emulated memblks, failing emulation\n"); + return -EINVAL; + } + + ei->nr_blks++; + eb->start = pb->start; + eb->end = pb->start + size; + eb->nid = nid; + + if (emu_nid_to_phys[nid] == NUMA_NO_NODE) + emu_nid_to_phys[nid] = pb->nid; + + pb->start += size; + if (pb->start >= pb->end) { + WARN_ON_ONCE(pb->start > pb->end); + numa_remove_memblk_from(phys_blk, pi); + } + + printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, + eb->start, eb->end, (eb->end - eb->start) >> 20); + return 0; +} + +/* + * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr + * to max_addr. The return value is the number of nodes allocated. + */ +static int __init split_nodes_interleave(struct numa_meminfo *ei, + struct numa_meminfo *pi, + u64 addr, u64 max_addr, int nr_nodes) +{ + nodemask_t physnode_mask = NODE_MASK_NONE; + u64 size; + int big; + int nid = 0; + int i, ret; + + if (nr_nodes <= 0) + return -1; + if (nr_nodes > MAX_NUMNODES) { + pr_info("numa=fake=%d too large, reducing to %d\n", + nr_nodes, MAX_NUMNODES); + nr_nodes = MAX_NUMNODES; + } + + size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; + /* + * Calculate the number of big nodes that can be allocated as a result + * of consolidating the remainder. + */ + big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / + FAKE_NODE_MIN_SIZE; + + size &= FAKE_NODE_MIN_HASH_MASK; + if (!size) { + pr_err("Not enough memory for each node. " + "NUMA emulation disabled.\n"); + return -1; + } + + for (i = 0; i < pi->nr_blks; i++) + node_set(pi->blk[i].nid, physnode_mask); + + /* + * Continue to fill physical nodes with fake nodes until there is no + * memory left on any of them. + */ + while (nodes_weight(physnode_mask)) { + for_each_node_mask(i, physnode_mask) { + u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); + u64 start, limit, end; + int phys_blk; + + phys_blk = emu_find_memblk_by_nid(i, pi); + if (phys_blk < 0) { + node_clear(i, physnode_mask); + continue; + } + start = pi->blk[phys_blk].start; + limit = pi->blk[phys_blk].end; + end = start + size; + + if (nid < big) + end += FAKE_NODE_MIN_SIZE; + + /* + * Continue to add memory to this fake node if its + * non-reserved memory is less than the per-node size. + */ + while (end - start - + memblock_x86_hole_size(start, end) < size) { + end += FAKE_NODE_MIN_SIZE; + if (end > limit) { + end = limit; + break; + } + } + + /* + * If there won't be at least FAKE_NODE_MIN_SIZE of + * non-reserved memory in ZONE_DMA32 for the next node, + * this one must extend to the boundary. + */ + if (end < dma32_end && dma32_end - end - + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + end = dma32_end; + + /* + * If there won't be enough non-reserved memory for the + * next node, this one must extend to the end of the + * physical node. + */ + if (limit - end - + memblock_x86_hole_size(end, limit) < size) + end = limit; + + ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, + phys_blk, + min(end, limit) - start); + if (ret < 0) + return ret; + } + } + return 0; +} + +/* + * Returns the end address of a node so that there is at least `size' amount of + * non-reserved memory or `max_addr' is reached. + */ +static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) +{ + u64 end = start + size; + + while (end - start - memblock_x86_hole_size(start, end) < size) { + end += FAKE_NODE_MIN_SIZE; + if (end > max_addr) { + end = max_addr; + break; + } + } + return end; +} + +/* + * Sets up fake nodes of `size' interleaved over physical nodes ranging from + * `addr' to `max_addr'. The return value is the number of nodes allocated. + */ +static int __init split_nodes_size_interleave(struct numa_meminfo *ei, + struct numa_meminfo *pi, + u64 addr, u64 max_addr, u64 size) +{ + nodemask_t physnode_mask = NODE_MASK_NONE; + u64 min_size; + int nid = 0; + int i, ret; + + if (!size) + return -1; + /* + * The limit on emulated nodes is MAX_NUMNODES, so the size per node is + * increased accordingly if the requested size is too small. This + * creates a uniform distribution of node sizes across the entire + * machine (but not necessarily over physical nodes). + */ + min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / + MAX_NUMNODES; + min_size = max(min_size, FAKE_NODE_MIN_SIZE); + if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) + min_size = (min_size + FAKE_NODE_MIN_SIZE) & + FAKE_NODE_MIN_HASH_MASK; + if (size < min_size) { + pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", + size >> 20, min_size >> 20); + size = min_size; + } + size &= FAKE_NODE_MIN_HASH_MASK; + + for (i = 0; i < pi->nr_blks; i++) + node_set(pi->blk[i].nid, physnode_mask); + + /* + * Fill physical nodes with fake nodes of size until there is no memory + * left on any of them. + */ + while (nodes_weight(physnode_mask)) { + for_each_node_mask(i, physnode_mask) { + u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; + u64 start, limit, end; + int phys_blk; + + phys_blk = emu_find_memblk_by_nid(i, pi); + if (phys_blk < 0) { + node_clear(i, physnode_mask); + continue; + } + start = pi->blk[phys_blk].start; + limit = pi->blk[phys_blk].end; + + end = find_end_of_node(start, limit, size); + /* + * If there won't be at least FAKE_NODE_MIN_SIZE of + * non-reserved memory in ZONE_DMA32 for the next node, + * this one must extend to the boundary. + */ + if (end < dma32_end && dma32_end - end - + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + end = dma32_end; + + /* + * If there won't be enough non-reserved memory for the + * next node, this one must extend to the end of the + * physical node. + */ + if (limit - end - + memblock_x86_hole_size(end, limit) < size) + end = limit; + + ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, + phys_blk, + min(end, limit) - start); + if (ret < 0) + return ret; + } + } + return 0; +} + +/* + * Sets up the system RAM area from start_pfn to last_pfn according to the + * numa=fake command-line option. + */ +void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) +{ + static struct numa_meminfo ei __initdata; + static struct numa_meminfo pi __initdata; + const u64 max_addr = max_pfn << PAGE_SHIFT; + u8 *phys_dist = NULL; + int i, j, ret; + + if (!emu_cmdline) + goto no_emu; + + memset(&ei, 0, sizeof(ei)); + pi = *numa_meminfo; + + for (i = 0; i < MAX_NUMNODES; i++) + emu_nid_to_phys[i] = NUMA_NO_NODE; + + /* + * If the numa=fake command-line contains a 'M' or 'G', it represents + * the fixed node size. Otherwise, if it is just a single number N, + * split the system RAM into N fake nodes. + */ + if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { + u64 size; + + size = memparse(emu_cmdline, &emu_cmdline); + ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); + } else { + unsigned long n; + + n = simple_strtoul(emu_cmdline, NULL, 0); + ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); + } + + if (ret < 0) + goto no_emu; + + if (numa_cleanup_meminfo(&ei) < 0) { + pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); + goto no_emu; + } + + /* + * Copy the original distance table. It's temporary so no need to + * reserve it. + */ + if (numa_dist_cnt) { + size_t size = numa_dist_cnt * sizeof(phys_dist[0]); + u64 phys; + + phys = memblock_find_in_range(0, + (u64)max_pfn_mapped << PAGE_SHIFT, + size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); + goto no_emu; + } + phys_dist = __va(phys); + + for (i = 0; i < numa_dist_cnt; i++) + for (j = 0; j < numa_dist_cnt; j++) + phys_dist[i * numa_dist_cnt + j] = + node_distance(i, j); + } + + /* commit */ + *numa_meminfo = ei; + + /* + * Transform __apicid_to_node table to use emulated nids by + * reverse-mapping phys_nid. The maps should always exist but fall + * back to zero just in case. + */ + for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { + if (__apicid_to_node[i] == NUMA_NO_NODE) + continue; + for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) + if (__apicid_to_node[i] == emu_nid_to_phys[j]) + break; + __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; + } + + /* make sure all emulated nodes are mapped to a physical node */ + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + if (emu_nid_to_phys[i] == NUMA_NO_NODE) + emu_nid_to_phys[i] = 0; + + /* transform distance table */ + numa_reset_distance(); + for (i = 0; i < MAX_NUMNODES; i++) { + for (j = 0; j < MAX_NUMNODES; j++) { + int physi = emu_nid_to_phys[i]; + int physj = emu_nid_to_phys[j]; + int dist; + + if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) + dist = physi == physj ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + else + dist = phys_dist[physi * numa_dist_cnt + physj]; + + numa_set_distance(i, j, dist); + } + } + return; + +no_emu: + /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + emu_nid_to_phys[i] = i; +} + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +void __cpuinit numa_add_cpu(int cpu) +{ + int physnid, nid; + + nid = numa_cpu_node(cpu); + if (nid == NUMA_NO_NODE) + nid = early_cpu_to_node(cpu); + BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); + + physnid = emu_nid_to_phys[nid]; + + /* + * Map the cpu to each emulated node that is allocated on the physical + * node of the cpu's apic id. + */ + for_each_online_node(nid) + if (emu_nid_to_phys[nid] == physnid) + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + int i; + + for_each_online_node(i) + cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); +} +#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ +static void __cpuinit numa_set_cpumask(int cpu, int enable) +{ + struct cpumask *mask; + int nid, physnid, i; + + nid = early_cpu_to_node(cpu); + if (nid == NUMA_NO_NODE) { + /* early_cpu_to_node() already emits a warning and trace */ + return; + } + + physnid = emu_nid_to_phys[nid]; + + for_each_online_node(i) { + if (emu_nid_to_phys[nid] != physnid) + continue; + + mask = debug_cpumask_set_cpu(cpu, enable); + if (!mask) + return; + + if (enable) + cpumask_set_cpu(cpu, mask); + else + cpumask_clear_cpu(cpu, mask); + } +} + +void __cpuinit numa_add_cpu(int cpu) +{ + numa_set_cpumask(cpu, 1); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + numa_set_cpumask(cpu, 0); +} +#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h new file mode 100644 index 0000000..ef2d973 --- /dev/null +++ b/arch/x86/mm/numa_internal.h @@ -0,0 +1,31 @@ +#ifndef __X86_MM_NUMA_INTERNAL_H +#define __X86_MM_NUMA_INTERNAL_H + +#include +#include + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; + +void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); +int __init numa_cleanup_meminfo(struct numa_meminfo *mi); +void __init numa_reset_distance(void); + +#ifdef CONFIG_NUMA_EMU +void __init numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt); +#else +static inline void numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt) +{ } +#endif + +#endif /* __X86_MM_NUMA_INTERNAL_H */ -- cgit v0.10.2 From 90e6b677b47ff8c5ba1637941af6b9f92723b003 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 22 Feb 2011 11:10:08 +0100 Subject: x86-64, NUMA: Add proper function comments to global functions Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Ingo Molnar diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 45a361b..848381b 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -205,6 +205,14 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end, return 0; } +/** + * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo + * @idx: Index of memblk to remove + * @mi: numa_meminfo to remove memblk from + * + * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and + * decrementing @mi->nr_blks. + */ void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) { mi->nr_blks--; @@ -212,6 +220,17 @@ void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) (mi->nr_blks - idx) * sizeof(mi->blk[0])); } +/** + * numa_add_memblk - Add one numa_memblk to numa_meminfo + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * Add a new memblk to the default numa_meminfo. + * + * RETURNS: + * 0 on success, -errno on failure. + */ int __init numa_add_memblk(int nid, u64 start, u64 end) { return numa_add_memblk_to(nid, start, end, &numa_meminfo); @@ -263,6 +282,16 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } +/** + * numa_cleanup_meminfo - Cleanup a numa_meminfo + * @mi: numa_meminfo to clean up + * + * Sanitize @mi by merging and removing unncessary memblks. Also check for + * conflicts and clear unused memblks. + * + * RETURNS: + * 0 on success, -errno on failure. + */ int __init numa_cleanup_meminfo(struct numa_meminfo *mi) { const u64 low = 0; @@ -353,9 +382,11 @@ static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, node_set(mi->blk[i].nid, *nodemask); } -/* - * Reset distance table. The current table is freed. The next - * numa_set_distance() call will create a new one. +/** + * numa_reset_distance - Reset NUMA distance table + * + * The current table is freed. The next numa_set_distance() call will + * create a new one. */ void __init numa_reset_distance(void) { @@ -370,10 +401,15 @@ void __init numa_reset_distance(void) numa_distance = NULL; } -/* - * Set the distance between node @from to @to to @distance. If distance - * table doesn't exist, one which is large enough to accomodate all the - * currently known nodes will be created. +/** + * numa_set_distance - Set NUMA distance from one NUMA to another + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. If distance table + * doesn't exist, one which is large enough to accomodate all the currently + * known nodes will be created. */ void __init numa_set_distance(int from, int to, int distance) { diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 23fa2d0..607a2e8 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -267,9 +267,32 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, return 0; } -/* - * Sets up the system RAM area from start_pfn to last_pfn according to the - * numa=fake command-line option. +/** + * numa_emulation - Emulate NUMA nodes + * @numa_meminfo: NUMA configuration to massage + * @numa_dist_cnt: The size of the physical NUMA distance table + * + * Emulate NUMA nodes according to the numa=fake kernel parameter. + * @numa_meminfo contains the physical memory configuration and is modified + * to reflect the emulated configuration on success. @numa_dist_cnt is + * used to determine the size of the physical distance table. + * + * On success, the following modifications are made. + * + * - @numa_meminfo is updated to reflect the emulated nodes. + * + * - __apicid_to_node[] is updated such that APIC IDs are mapped to the + * emulated nodes. + * + * - NUMA distance table is rebuilt to represent distances between emulated + * nodes. The distances are determined considering how emulated nodes + * are mapped to physical nodes and match the actual distances. + * + * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical + * nodes. This is used by numa_add_cpu() and numa_remove_cpu(). + * + * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with + * identity mapping and no other modification is made. */ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) { -- cgit v0.10.2 From 2bf50555b0920be7e29d3823f6bbd20ee5920489 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 22 Feb 2011 11:18:49 +0100 Subject: x86-64, NUMA: Seperate out numa_alloc_distance() from numa_set_distance() Alloc code is much bigger the distance setting. Separate it out into numa_alloc_distance() for readability. -v2: Let alloc_numa_distance to return -ENOMEM on failing path, requested by tj. -tj: Description update. Minor tweaks including function name, location and return value check. Signed-off-by: Yinghai Lu Acked-by: David Rientjes Signed-off-by: Tejun Heo diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 848381b..cccc01d 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -401,6 +401,44 @@ void __init numa_reset_distance(void) numa_distance = NULL; } +static int __init numa_alloc_distance(void) +{ + nodemask_t nodes_parsed; + size_t size; + int i, j, cnt = 0; + u64 phys; + + /* size the new table and allocate it */ + nodes_parsed = numa_nodes_parsed; + numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); + + for_each_node_mask(i, nodes_parsed) + cnt = i; + size = ++cnt * sizeof(numa_distance[0]); + + phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, + size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate distance table!\n"); + /* don't retry until explicitly reset */ + numa_distance = (void *)1LU; + return -ENOMEM; + } + memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); + + numa_distance = __va(phys); + numa_distance_cnt = cnt; + + /* fill with the default distances */ + for (i = 0; i < cnt; i++) + for (j = 0; j < cnt; j++) + numa_distance[i * cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); + + return 0; +} + /** * numa_set_distance - Set NUMA distance from one NUMA to another * @from: the 'from' node to set distance @@ -413,41 +451,8 @@ void __init numa_reset_distance(void) */ void __init numa_set_distance(int from, int to, int distance) { - if (!numa_distance) { - nodemask_t nodes_parsed; - size_t size; - int i, j, cnt = 0; - u64 phys; - - /* size the new table and allocate it */ - nodes_parsed = numa_nodes_parsed; - numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); - - for_each_node_mask(i, nodes_parsed) - cnt = i; - size = ++cnt * sizeof(numa_distance[0]); - - phys = memblock_find_in_range(0, - (u64)max_pfn_mapped << PAGE_SHIFT, - size, PAGE_SIZE); - if (phys == MEMBLOCK_ERROR) { - pr_warning("NUMA: Warning: can't allocate distance table!\n"); - /* don't retry until explicitly reset */ - numa_distance = (void *)1LU; - return; - } - memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); - - numa_distance = __va(phys); - numa_distance_cnt = cnt; - - /* fill with the default distances */ - for (i = 0; i < cnt; i++) - for (j = 0; j < cnt; j++) - numa_distance[i * cnt + j] = i == j ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); - } + if (!numa_distance && numa_alloc_distance() < 0) + return; if (from >= numa_distance_cnt || to >= numa_distance_cnt) { printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", -- cgit v0.10.2 From 0932587328d9bd5b500a640fbaff3290c8d4cabf Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 24 Feb 2011 14:43:05 +0100 Subject: bootmem: Separate out CONFIG_NO_BOOTMEM code into nobootmem.c mm/bootmem.c contained code paths for both bootmem and no bootmem configurations. They implement about the same set of APIs in different ways and as a result bootmem.c contains massive amount of #ifdef CONFIG_NO_BOOTMEM. Separate out CONFIG_NO_BOOTMEM code into mm/nobootmem.c. As the common part is relatively small, duplicate them in nobootmem.c instead of creating a common file or ifdef'ing in bootmem.c. The followings are duplicated. * {min|max}_low_pfn, max_pfn, saved_max_pfn * free_bootmem_late() * ___alloc_bootmem() * __alloc_bootmem_low() The followings are applicable only to nobootmem and moved verbatim. * __free_pages_memory() * free_all_memory_core_early() The followings are not applicable to nobootmem and omitted in nobootmem.c. * reserve_bootmem_node() * reserve_bootmem() The rest split function bodies according to CONFIG_NO_BOOTMEM. Makefile is updated so that only either bootmem.c or nobootmem.c is built according to CONFIG_NO_BOOTMEM. This patch doesn't introduce any behavior change. -tj: Rewrote commit description. Suggested-by: Ingo Molnar Signed-off-by: Yinghai Lu Acked-by: Andrew Morton Signed-off-by: Tejun Heo diff --git a/mm/Makefile b/mm/Makefile index 2b1b575..42a8326 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -7,7 +7,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ vmalloc.o pagewalk.o pgtable-generic.o -obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ +obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ @@ -15,6 +15,12 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ $(mmu-y) obj-y += init-mm.o +ifdef CONFIG_NO_BOOTMEM + obj-y += nobootmem.o +else + obj-y += bootmem.o +endif + obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o obj-$(CONFIG_BOUNCE) += bounce.o diff --git a/mm/bootmem.c b/mm/bootmem.c index 13b0caa..4403e2f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -35,7 +35,6 @@ unsigned long max_pfn; unsigned long saved_max_pfn; #endif -#ifndef CONFIG_NO_BOOTMEM bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); @@ -146,7 +145,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages) min_low_pfn = start; return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); } -#endif + /* * free_bootmem_late - free bootmem pages directly to page allocator * @addr: starting address of the range @@ -171,53 +170,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) } } -#ifdef CONFIG_NO_BOOTMEM -static void __init __free_pages_memory(unsigned long start, unsigned long end) -{ - int i; - unsigned long start_aligned, end_aligned; - int order = ilog2(BITS_PER_LONG); - - start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); - end_aligned = end & ~(BITS_PER_LONG - 1); - - if (end_aligned <= start_aligned) { - for (i = start; i < end; i++) - __free_pages_bootmem(pfn_to_page(i), 0); - - return; - } - - for (i = start; i < start_aligned; i++) - __free_pages_bootmem(pfn_to_page(i), 0); - - for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG) - __free_pages_bootmem(pfn_to_page(i), order); - - for (i = end_aligned; i < end; i++) - __free_pages_bootmem(pfn_to_page(i), 0); -} - -unsigned long __init free_all_memory_core_early(int nodeid) -{ - int i; - u64 start, end; - unsigned long count = 0; - struct range *range = NULL; - int nr_range; - - nr_range = get_free_all_memory_range(&range, nodeid); - - for (i = 0; i < nr_range; i++) { - start = range[i].start; - end = range[i].end; - count += end - start; - __free_pages_memory(start, end); - } - - return count; -} -#else static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) { int aligned; @@ -278,7 +230,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) return count; } -#endif /** * free_all_bootmem_node - release a node's free pages to the buddy allocator @@ -289,12 +240,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) { register_page_bootmem_info_node(pgdat); -#ifdef CONFIG_NO_BOOTMEM - /* free_all_memory_core_early(MAX_NUMNODES) will be called later */ - return 0; -#else return free_all_bootmem_core(pgdat->bdata); -#endif } /** @@ -304,16 +250,6 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) */ unsigned long __init free_all_bootmem(void) { -#ifdef CONFIG_NO_BOOTMEM - /* - * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id - * because in some case like Node0 doesnt have RAM installed - * low ram will be on Node1 - * Use MAX_NUMNODES will make sure all ranges in early_node_map[] - * will be used instead of only Node0 related - */ - return free_all_memory_core_early(MAX_NUMNODES); -#else unsigned long total_pages = 0; bootmem_data_t *bdata; @@ -321,10 +257,8 @@ unsigned long __init free_all_bootmem(void) total_pages += free_all_bootmem_core(bdata); return total_pages; -#endif } -#ifndef CONFIG_NO_BOOTMEM static void __init __free(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx) { @@ -419,7 +353,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end, } BUG(); } -#endif /** * free_bootmem_node - mark a page range as usable @@ -434,10 +367,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end, void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { -#ifdef CONFIG_NO_BOOTMEM - kmemleak_free_part(__va(physaddr), size); - memblock_x86_free_range(physaddr, physaddr + size); -#else unsigned long start, end; kmemleak_free_part(__va(physaddr), size); @@ -446,7 +375,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, end = PFN_DOWN(physaddr + size); mark_bootmem_node(pgdat->bdata, start, end, 0, 0); -#endif } /** @@ -460,10 +388,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, */ void __init free_bootmem(unsigned long addr, unsigned long size) { -#ifdef CONFIG_NO_BOOTMEM - kmemleak_free_part(__va(addr), size); - memblock_x86_free_range(addr, addr + size); -#else unsigned long start, end; kmemleak_free_part(__va(addr), size); @@ -472,7 +396,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size) end = PFN_DOWN(addr + size); mark_bootmem(start, end, 0, 0); -#endif } /** @@ -489,17 +412,12 @@ void __init free_bootmem(unsigned long addr, unsigned long size) int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) { -#ifdef CONFIG_NO_BOOTMEM - panic("no bootmem"); - return 0; -#else unsigned long start, end; start = PFN_DOWN(physaddr); end = PFN_UP(physaddr + size); return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); -#endif } /** @@ -515,20 +433,14 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, int __init reserve_bootmem(unsigned long addr, unsigned long size, int flags) { -#ifdef CONFIG_NO_BOOTMEM - panic("no bootmem"); - return 0; -#else unsigned long start, end; start = PFN_DOWN(addr); end = PFN_UP(addr + size); return mark_bootmem(start, end, 1, flags); -#endif } -#ifndef CONFIG_NO_BOOTMEM int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, int flags) { @@ -685,33 +597,12 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, #endif return NULL; } -#endif static void * __init ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { -#ifdef CONFIG_NO_BOOTMEM - void *ptr; - - if (WARN_ON_ONCE(slab_is_available())) - return kzalloc(size, GFP_NOWAIT); - -restart: - - ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); - - if (ptr) - return ptr; - - if (goal != 0) { - goal = 0; - goto restart; - } - - return NULL; -#else bootmem_data_t *bdata; void *region; @@ -737,7 +628,6 @@ restart: } return NULL; -#endif } /** @@ -758,10 +648,6 @@ void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, { unsigned long limit = 0; -#ifdef CONFIG_NO_BOOTMEM - limit = -1UL; -#endif - return ___alloc_bootmem_nopanic(size, align, goal, limit); } @@ -798,14 +684,9 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, { unsigned long limit = 0; -#ifdef CONFIG_NO_BOOTMEM - limit = -1UL; -#endif - return ___alloc_bootmem(size, align, goal, limit); } -#ifndef CONFIG_NO_BOOTMEM static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) @@ -822,7 +703,6 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, return ___alloc_bootmem(size, align, goal, limit); } -#endif /** * __alloc_bootmem_node - allocate boot memory from a specific node @@ -842,24 +722,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { - void *ptr; - if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); -#ifdef CONFIG_NO_BOOTMEM - ptr = __alloc_memory_core_early(pgdat->node_id, size, align, - goal, -1ULL); - if (ptr) - return ptr; - - ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, - goal, -1ULL); -#else - ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); -#endif - - return ptr; + return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); } void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, @@ -880,13 +746,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long new_goal; new_goal = MAX_DMA32_PFN << PAGE_SHIFT; -#ifdef CONFIG_NO_BOOTMEM - ptr = __alloc_memory_core_early(pgdat->node_id, size, align, - new_goal, -1ULL); -#else ptr = alloc_bootmem_core(pgdat->bdata, size, align, new_goal, 0); -#endif if (ptr) return ptr; } @@ -907,16 +768,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, void * __init alloc_bootmem_section(unsigned long size, unsigned long section_nr) { -#ifdef CONFIG_NO_BOOTMEM - unsigned long pfn, goal, limit; - - pfn = section_nr_to_pfn(section_nr); - goal = pfn << PAGE_SHIFT; - limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; - - return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, - SMP_CACHE_BYTES, goal, limit); -#else bootmem_data_t *bdata; unsigned long pfn, goal, limit; @@ -926,7 +777,6 @@ void * __init alloc_bootmem_section(unsigned long size, bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); -#endif } #endif @@ -938,16 +788,11 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); -#ifdef CONFIG_NO_BOOTMEM - ptr = __alloc_memory_core_early(pgdat->node_id, size, align, - goal, -1ULL); -#else ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); if (ptr) return ptr; ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); -#endif if (ptr) return ptr; @@ -995,21 +840,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { - void *ptr; - if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); -#ifdef CONFIG_NO_BOOTMEM - ptr = __alloc_memory_core_early(pgdat->node_id, size, align, + return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, ARCH_LOW_ADDRESS_LIMIT); - if (ptr) - return ptr; - ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, - goal, ARCH_LOW_ADDRESS_LIMIT); -#else - ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, - goal, ARCH_LOW_ADDRESS_LIMIT); -#endif - return ptr; } diff --git a/mm/nobootmem.c b/mm/nobootmem.c new file mode 100644 index 0000000..f220b8d --- /dev/null +++ b/mm/nobootmem.c @@ -0,0 +1,405 @@ +/* + * bootmem - A boot-time physical memory allocator and configurator + * + * Copyright (C) 1999 Ingo Molnar + * 1999 Kanoj Sarcar, SGI + * 2008 Johannes Weiner + * + * Access to this subsystem has to be serialized externally (which is true + * for the boot process anyway). + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "internal.h" + +unsigned long max_low_pfn; +unsigned long min_low_pfn; +unsigned long max_pfn; + +#ifdef CONFIG_CRASH_DUMP +/* + * If we have booted due to a crash, max_pfn will be a very low value. We need + * to know the amount of memory that the previous kernel used. + */ +unsigned long saved_max_pfn; +#endif + +/* + * free_bootmem_late - free bootmem pages directly to page allocator + * @addr: starting address of the range + * @size: size of the range in bytes + * + * This is only useful when the bootmem allocator has already been torn + * down, but we are still initializing the system. Pages are given directly + * to the page allocator, no bootmem metadata is updated because it is gone. + */ +void __init free_bootmem_late(unsigned long addr, unsigned long size) +{ + unsigned long cursor, end; + + kmemleak_free_part(__va(addr), size); + + cursor = PFN_UP(addr); + end = PFN_DOWN(addr + size); + + for (; cursor < end; cursor++) { + __free_pages_bootmem(pfn_to_page(cursor), 0); + totalram_pages++; + } +} + +static void __init __free_pages_memory(unsigned long start, unsigned long end) +{ + int i; + unsigned long start_aligned, end_aligned; + int order = ilog2(BITS_PER_LONG); + + start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); + end_aligned = end & ~(BITS_PER_LONG - 1); + + if (end_aligned <= start_aligned) { + for (i = start; i < end; i++) + __free_pages_bootmem(pfn_to_page(i), 0); + + return; + } + + for (i = start; i < start_aligned; i++) + __free_pages_bootmem(pfn_to_page(i), 0); + + for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG) + __free_pages_bootmem(pfn_to_page(i), order); + + for (i = end_aligned; i < end; i++) + __free_pages_bootmem(pfn_to_page(i), 0); +} + +unsigned long __init free_all_memory_core_early(int nodeid) +{ + int i; + u64 start, end; + unsigned long count = 0; + struct range *range = NULL; + int nr_range; + + nr_range = get_free_all_memory_range(&range, nodeid); + + for (i = 0; i < nr_range; i++) { + start = range[i].start; + end = range[i].end; + count += end - start; + __free_pages_memory(start, end); + } + + return count; +} + +/** + * free_all_bootmem_node - release a node's free pages to the buddy allocator + * @pgdat: node to be released + * + * Returns the number of pages actually released. + */ +unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) +{ + register_page_bootmem_info_node(pgdat); + + /* free_all_memory_core_early(MAX_NUMNODES) will be called later */ + return 0; +} + +/** + * free_all_bootmem - release free pages to the buddy allocator + * + * Returns the number of pages actually released. + */ +unsigned long __init free_all_bootmem(void) +{ + /* + * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id + * because in some case like Node0 doesnt have RAM installed + * low ram will be on Node1 + * Use MAX_NUMNODES will make sure all ranges in early_node_map[] + * will be used instead of only Node0 related + */ + return free_all_memory_core_early(MAX_NUMNODES); +} + +/** + * free_bootmem_node - mark a page range as usable + * @pgdat: node the range resides on + * @physaddr: starting address of the range + * @size: size of the range in bytes + * + * Partial pages will be considered reserved and left as they are. + * + * The range must reside completely on the specified node. + */ +void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, + unsigned long size) +{ + kmemleak_free_part(__va(physaddr), size); + memblock_x86_free_range(physaddr, physaddr + size); +} + +/** + * free_bootmem - mark a page range as usable + * @addr: starting address of the range + * @size: size of the range in bytes + * + * Partial pages will be considered reserved and left as they are. + * + * The range must be contiguous but may span node boundaries. + */ +void __init free_bootmem(unsigned long addr, unsigned long size) +{ + kmemleak_free_part(__va(addr), size); + memblock_x86_free_range(addr, addr + size); +} + +static void * __init ___alloc_bootmem_nopanic(unsigned long size, + unsigned long align, + unsigned long goal, + unsigned long limit) +{ + void *ptr; + + if (WARN_ON_ONCE(slab_is_available())) + return kzalloc(size, GFP_NOWAIT); + +restart: + + ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); + + if (ptr) + return ptr; + + if (goal != 0) { + goal = 0; + goto restart; + } + + return NULL; +} + +/** + * __alloc_bootmem_nopanic - allocate boot memory without panicking + * @size: size of the request in bytes + * @align: alignment of the region + * @goal: preferred starting address of the region + * + * The goal is dropped if it can not be satisfied and the allocation will + * fall back to memory below @goal. + * + * Allocation may happen on any node in the system. + * + * Returns NULL on failure. + */ +void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, + unsigned long goal) +{ + unsigned long limit = -1UL; + + return ___alloc_bootmem_nopanic(size, align, goal, limit); +} + +static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, + unsigned long goal, unsigned long limit) +{ + void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); + + if (mem) + return mem; + /* + * Whoops, we cannot satisfy the allocation request. + */ + printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); + panic("Out of memory"); + return NULL; +} + +/** + * __alloc_bootmem - allocate boot memory + * @size: size of the request in bytes + * @align: alignment of the region + * @goal: preferred starting address of the region + * + * The goal is dropped if it can not be satisfied and the allocation will + * fall back to memory below @goal. + * + * Allocation may happen on any node in the system. + * + * The function panics if the request can not be satisfied. + */ +void * __init __alloc_bootmem(unsigned long size, unsigned long align, + unsigned long goal) +{ + unsigned long limit = -1UL; + + return ___alloc_bootmem(size, align, goal, limit); +} + +/** + * __alloc_bootmem_node - allocate boot memory from a specific node + * @pgdat: node to allocate from + * @size: size of the request in bytes + * @align: alignment of the region + * @goal: preferred starting address of the region + * + * The goal is dropped if it can not be satisfied and the allocation will + * fall back to memory below @goal. + * + * Allocation may fall back to any node in the system if the specified node + * can not hold the requested memory. + * + * The function panics if the request can not be satisfied. + */ +void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, + unsigned long align, unsigned long goal) +{ + void *ptr; + + if (WARN_ON_ONCE(slab_is_available())) + return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); + + ptr = __alloc_memory_core_early(pgdat->node_id, size, align, + goal, -1ULL); + if (ptr) + return ptr; + + return __alloc_memory_core_early(MAX_NUMNODES, size, align, + goal, -1ULL); +} + +void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, + unsigned long align, unsigned long goal) +{ +#ifdef MAX_DMA32_PFN + unsigned long end_pfn; + + if (WARN_ON_ONCE(slab_is_available())) + return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); + + /* update goal according ...MAX_DMA32_PFN */ + end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; + + if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && + (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { + void *ptr; + unsigned long new_goal; + + new_goal = MAX_DMA32_PFN << PAGE_SHIFT; + ptr = __alloc_memory_core_early(pgdat->node_id, size, align, + new_goal, -1ULL); + if (ptr) + return ptr; + } +#endif + + return __alloc_bootmem_node(pgdat, size, align, goal); + +} + +#ifdef CONFIG_SPARSEMEM +/** + * alloc_bootmem_section - allocate boot memory from a specific section + * @size: size of the request in bytes + * @section_nr: sparse map section to allocate from + * + * Return NULL on failure. + */ +void * __init alloc_bootmem_section(unsigned long size, + unsigned long section_nr) +{ + unsigned long pfn, goal, limit; + + pfn = section_nr_to_pfn(section_nr); + goal = pfn << PAGE_SHIFT; + limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; + + return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, + SMP_CACHE_BYTES, goal, limit); +} +#endif + +void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, + unsigned long align, unsigned long goal) +{ + void *ptr; + + if (WARN_ON_ONCE(slab_is_available())) + return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); + + ptr = __alloc_memory_core_early(pgdat->node_id, size, align, + goal, -1ULL); + if (ptr) + return ptr; + + return __alloc_bootmem_nopanic(size, align, goal); +} + +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + +/** + * __alloc_bootmem_low - allocate low boot memory + * @size: size of the request in bytes + * @align: alignment of the region + * @goal: preferred starting address of the region + * + * The goal is dropped if it can not be satisfied and the allocation will + * fall back to memory below @goal. + * + * Allocation may happen on any node in the system. + * + * The function panics if the request can not be satisfied. + */ +void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, + unsigned long goal) +{ + return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); +} + +/** + * __alloc_bootmem_low_node - allocate low boot memory from a specific node + * @pgdat: node to allocate from + * @size: size of the request in bytes + * @align: alignment of the region + * @goal: preferred starting address of the region + * + * The goal is dropped if it can not be satisfied and the allocation will + * fall back to memory below @goal. + * + * Allocation may fall back to any node in the system if the specified node + * can not hold the requested memory. + * + * The function panics if the request can not be satisfied. + */ +void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, + unsigned long align, unsigned long goal) +{ + void *ptr; + + if (WARN_ON_ONCE(slab_is_available())) + return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); + + ptr = __alloc_memory_core_early(pgdat->node_id, size, align, + goal, ARCH_LOW_ADDRESS_LIMIT); + if (ptr) + return ptr; + + return __alloc_memory_core_early(MAX_NUMNODES, size, align, + goal, ARCH_LOW_ADDRESS_LIMIT); +} -- cgit v0.10.2 From e782ab421bbba1912c87934bd0e8998630736418 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 24 Feb 2011 14:43:06 +0100 Subject: bootmem: Move contig_page_data definition to bootmem.c/nobootmem.c Now that bootmem.c and nobootmem.c are separate, it's cleaner to define contig_page_data in each file than in page_alloc.c with #ifdef. Move it. This patch doesn't introduce any behavior change. -v2: According to Andrew, fixed the struct layout. -tj: Updated commit description. Signed-off-by: Yinghai Lu Acked-by: Andrew Morton Signed-off-by: Tejun Heo diff --git a/mm/bootmem.c b/mm/bootmem.c index 4403e2f..07aeb89 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -23,6 +23,13 @@ #include "internal.h" +#ifndef CONFIG_NEED_MULTIPLE_NODES +struct pglist_data __refdata contig_page_data = { + .bdata = &bootmem_node_data[0] +}; +EXPORT_SYMBOL(contig_page_data); +#endif + unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; diff --git a/mm/nobootmem.c b/mm/nobootmem.c index f220b8d..6a018e4 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -23,6 +23,11 @@ #include "internal.h" +#ifndef CONFIG_NEED_MULTIPLE_NODES +struct pglist_data __refdata contig_page_data; +EXPORT_SYMBOL(contig_page_data); +#endif + unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 887ce3b..a243a7f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4841,15 +4841,6 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) dma_reserve = new_dma_reserve; } -#ifndef CONFIG_NEED_MULTIPLE_NODES -struct pglist_data __refdata contig_page_data = { -#ifndef CONFIG_NO_BOOTMEM - .bdata = &bootmem_node_data[0] -#endif - }; -EXPORT_SYMBOL(contig_page_data); -#endif - void __init free_area_init(unsigned long *zones_size) { free_area_init_node(0, zones_size, -- cgit v0.10.2 From 8bc1f91e1f0e977fb95b11d8fa686f5091888110 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 24 Feb 2011 14:43:06 +0100 Subject: bootmem: Move __alloc_memory_core_early() to nobootmem.c Now that bootmem.c and nobootmem.c are separate, there's no reason to define __alloc_memory_core_early(), which is used only by nobootmem, inside #ifdef in page_alloc.c. Move it to nobootmem.c and make it static. This patch doesn't introduce any behavior change. -tj: Updated commit description. Signed-off-by: Yinghai Lu Acked-by: Andrew Morton Signed-off-by: Tejun Heo diff --git a/include/linux/mm.h b/include/linux/mm.h index f6385fc..679300c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1309,8 +1309,6 @@ int add_from_early_node_map(struct range *range, int az, int nr_range, int nid); u64 __init find_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit); -void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, - u64 goal, u64 limit); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 6a018e4..e2bdb07 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -40,6 +40,31 @@ unsigned long max_pfn; unsigned long saved_max_pfn; #endif +static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, + u64 goal, u64 limit) +{ + void *ptr; + u64 addr; + + if (limit > memblock.current_limit) + limit = memblock.current_limit; + + addr = find_memory_core_early(nid, size, align, goal, limit); + + if (addr == MEMBLOCK_ERROR) + return NULL; + + ptr = phys_to_virt(addr); + memset(ptr, 0, size); + memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); + /* + * The min_count is set to 0 so that bootmem allocated blocks + * are never reported as leaks. + */ + kmemleak_alloc(ptr, size, 0, 0); + return ptr; +} + /* * free_bootmem_late - free bootmem pages directly to page allocator * @addr: starting address of the range diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a243a7f..6035136 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3780,34 +3780,6 @@ int __init add_from_early_node_map(struct range *range, int az, return nr_range; } -#ifdef CONFIG_NO_BOOTMEM -void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, - u64 goal, u64 limit) -{ - void *ptr; - u64 addr; - - if (limit > memblock.current_limit) - limit = memblock.current_limit; - - addr = find_memory_core_early(nid, size, align, goal, limit); - - if (addr == MEMBLOCK_ERROR) - return NULL; - - ptr = phys_to_virt(addr); - memset(ptr, 0, size); - memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); - /* - * The min_count is set to 0 so that bootmem allocated blocks - * are never reported as leaks. - */ - kmemleak_alloc(ptr, size, 0, 0); - return ptr; -} -#endif - - void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) { int i; -- cgit v0.10.2 From d1b19426b04787e48f2689923e28d37b488969b0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 24 Feb 2011 14:46:24 +0100 Subject: x86: Rename e820_table_* to pgt_buf_* e820_table_{start|end|top}, which are used to buffer page table allocation during early boot, are now derived from memblock and don't have much to do with e820. Change the names so that they reflect what they're used for. This patch doesn't introduce any behavior change. -v2: Ingo found that earlier patch "x86: Use early pre-allocated page table buffer top-down" caused crash on 32bit and needed to be dropped. This patch was updated to reflect the change. -tj: Updated commit description. Signed-off-by: Yinghai Lu Signed-off-by: Tejun Heo diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index 36fb1a6a..8dbe353 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h @@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start, unsigned long page_size_mask); -extern unsigned long __initdata e820_table_start; -extern unsigned long __meminitdata e820_table_end; -extern unsigned long __meminitdata e820_table_top; +extern unsigned long __initdata pgt_buf_start; +extern unsigned long __meminitdata pgt_buf_end; +extern unsigned long __meminitdata pgt_buf_top; #endif /* _ASM_X86_INIT_32_H */ diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index b8054e0..286d289 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -18,9 +18,9 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -unsigned long __initdata e820_table_start; -unsigned long __meminitdata e820_table_end; -unsigned long __meminitdata e820_table_top; +unsigned long __initdata pgt_buf_start; +unsigned long __meminitdata pgt_buf_end; +unsigned long __meminitdata pgt_buf_top; int after_bootmem; @@ -73,12 +73,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse, if (base == MEMBLOCK_ERROR) panic("Cannot find space for the kernel page tables"); - e820_table_start = base >> PAGE_SHIFT; - e820_table_end = e820_table_start; - e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); + pgt_buf_start = base >> PAGE_SHIFT; + pgt_buf_end = pgt_buf_start; + pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT); + end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); } struct map_range { @@ -272,9 +272,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __flush_tlb_all(); - if (!after_bootmem && e820_table_end > e820_table_start) - memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, - e820_table_end << PAGE_SHIFT, "PGTABLE"); + if (!after_bootmem && pgt_buf_end > pgt_buf_start) + memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, + pgt_buf_end << PAGE_SHIFT, "PGTABLE"); if (!after_bootmem) early_memtest(start, end); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5d43fa5..73ad7eb 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -62,10 +62,10 @@ bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { - unsigned long pfn = e820_table_end++; + unsigned long pfn = pgt_buf_end++; void *adr; - if (pfn >= e820_table_top) + if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); @@ -163,8 +163,8 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end - && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start - || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) { + && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start + || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4f1f461..470cc47 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -314,7 +314,7 @@ void __init cleanup_highmap(void) static __ref void *alloc_low_page(unsigned long *phys) { - unsigned long pfn = e820_table_end++; + unsigned long pfn = pgt_buf_end++; void *adr; if (after_bootmem) { @@ -324,7 +324,7 @@ static __ref void *alloc_low_page(unsigned long *phys) return adr; } - if (pfn >= e820_table_top) + if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61..13783a1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1443,7 +1443,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) * early_ioremap fixmap slot, make sure it is RO. */ if (!is_early_ioremap_ptep(ptep) && - pfn >= e820_table_start && pfn < e820_table_end) + pfn >= pgt_buf_start && pfn < pgt_buf_end) pte = pte_wrprotect(pte); return pte; -- cgit v0.10.2 From 1f565a896ee139a70e1a16f74a4ec29707691b0b Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 25 Feb 2011 10:06:39 +0100 Subject: x86-64, NUMA: Fix size of numa_distance array numa_distance should be sized like the SLIT, an NxN matrix where N is the highest node id + 1. This patch fixes the calculation to avoid overflowing the array on the subsequent iteration. -tj: The original patch used last index to calculate size. Yinghai pointed out it should be incremented so it is the number of elements instead of the last index to calculate the size of the table. Updated accordingly. Signed-off-by: David Rientjes Cc: Yinghai Lu Signed-off-by: Tejun Heo diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index cccc01d..7757d22 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -414,7 +414,8 @@ static int __init numa_alloc_distance(void) for_each_node_mask(i, nodes_parsed) cnt = i; - size = ++cnt * sizeof(numa_distance[0]); + cnt++; + size = cnt * cnt * sizeof(numa_distance[0]); phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, size, PAGE_SIZE); -- cgit v0.10.2 From cc28989437de5617875a2943697fe6ba51a0da8f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 26 Feb 2011 13:05:43 +0100 Subject: mm: Move early_node_map[] reverse scan helpers under HAVE_MEMBLOCK Heiko found recent memblock change triggers these warnings on s390: mm/page_alloc.c:3623:22: warning: 'last_active_region_index_in_nid' defined but not used mm/page_alloc.c:3638:22: warning: 'previous_active_region_index_in_nid' defined but not used Need to move those two function under HAVE_MEMBLOCK with its only user, find_memory_core_early(). -tj: Minor updates to description. Reported-by: Heiko Carstens Signed-off-by: Yinghai Lu Signed-off-by: Tejun Heo diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6035136..da0fe32 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3616,34 +3616,6 @@ static int __meminit next_active_region_index_in_nid(int index, int nid) return -1; } -/* - * Basic iterator support. Return the last range of PFNs for a node - * Note: nid == MAX_NUMNODES returns last region regardless of node - */ -static int __meminit last_active_region_index_in_nid(int nid) -{ - int i; - - for (i = nr_nodemap_entries - 1; i >= 0; i--) - if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) - return i; - - return -1; -} - -/* - * Basic iterator support. Return the previous active range of PFNs for a node - * Note: nid == MAX_NUMNODES returns next region regardless of node - */ -static int __meminit previous_active_region_index_in_nid(int index, int nid) -{ - for (index = index - 1; index >= 0; index--) - if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) - return index; - - return -1; -} - #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. @@ -3695,10 +3667,6 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node) for (i = first_active_region_index_in_nid(nid); i != -1; \ i = next_active_region_index_in_nid(i, nid)) -#define for_each_active_range_index_in_nid_reverse(i, nid) \ - for (i = last_active_region_index_in_nid(nid); i != -1; \ - i = previous_active_region_index_in_nid(i, nid)) - /** * free_bootmem_with_active_regions - Call free_bootmem_node for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. @@ -3731,6 +3699,38 @@ void __init free_bootmem_with_active_regions(int nid, } #ifdef CONFIG_HAVE_MEMBLOCK +/* + * Basic iterator support. Return the last range of PFNs for a node + * Note: nid == MAX_NUMNODES returns last region regardless of node + */ +static int __meminit last_active_region_index_in_nid(int nid) +{ + int i; + + for (i = nr_nodemap_entries - 1; i >= 0; i--) + if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) + return i; + + return -1; +} + +/* + * Basic iterator support. Return the previous active range of PFNs for a node + * Note: nid == MAX_NUMNODES returns next region regardless of node + */ +static int __meminit previous_active_region_index_in_nid(int index, int nid) +{ + for (index = index - 1; index >= 0; index--) + if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) + return index; + + return -1; +} + +#define for_each_active_range_index_in_nid_reverse(i, nid) \ + for (i = last_active_region_index_in_nid(nid); i != -1; \ + i = previous_active_region_index_in_nid(i, nid)) + u64 __init find_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { -- cgit v0.10.2 From ce0033307f1b45e23e0c149f56ea4855eb4687ce Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 2 Mar 2011 11:22:14 +0100 Subject: x86-64, NUMA: Fix distance table handling NUMA distance table handling has the following problems. * numa_reset_distance() uses numa_distance * sizeof(numa_distance[0]) as the table size when it should be using the square of numa_distance. * The same size miscalculation when allocation space for phys_dist in numa_emulation(). * In numa_emulation(), phys_dist must be reserved; otherwise, the new emulated distance table may overlap it. Fix them and, while at it, take numa_distance_cnt resetting in numa_reset_distance() out of the if block to simplify the code a bit. David Rientjes reported incorrect handling of distance table during emulation. -tj: Edited out numa_alloc_distance() related changes which weren't necessary and rewrote patch description. -v2: Ingo was unhappy with 80-column limit induced linebreaks. Let lines run over 80-column. Signed-off-by: Yinghai Lu Reported-by: David Rientjes Signed-off-by: Tejun Heo Cc: Ingo Molnar Acked-by: David Rientjes diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 7757d22..541746f 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -390,14 +390,12 @@ static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, */ void __init numa_reset_distance(void) { - size_t size; + size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); - if (numa_distance_cnt) { - size = numa_distance_cnt * sizeof(numa_distance[0]); + if (numa_distance_cnt) memblock_x86_free_range(__pa(numa_distance), __pa(numa_distance) + size); - numa_distance_cnt = 0; - } + numa_distance_cnt = 0; numa_distance = NULL; } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 607a2e8..0afa25d 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -300,6 +300,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) static struct numa_meminfo pi __initdata; const u64 max_addr = max_pfn << PAGE_SHIFT; u8 *phys_dist = NULL; + size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); int i, j, ret; if (!emu_cmdline) @@ -336,21 +337,18 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) goto no_emu; } - /* - * Copy the original distance table. It's temporary so no need to - * reserve it. - */ + /* copy the physical distance table */ if (numa_dist_cnt) { - size_t size = numa_dist_cnt * sizeof(phys_dist[0]); u64 phys; phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, - size, PAGE_SIZE); + phys_size, PAGE_SIZE); if (phys == MEMBLOCK_ERROR) { pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); goto no_emu; } + memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); phys_dist = __va(phys); for (i = 0; i < numa_dist_cnt; i++) @@ -398,6 +396,10 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) numa_set_distance(i, j, dist); } } + + /* free the copied physical distance table */ + if (phys_dist) + memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); return; no_emu: -- cgit v0.10.2 From eb8c1e2c830fc25c93bc94e215ed387fe142a98d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 2 Mar 2011 11:32:47 +0100 Subject: x86-64, NUMA: Better explain numa_distance handling Handling of out-of-bounds distances and allocation failure can use better documentation. Add it. Signed-off-by: Tejun Heo Cc: Yinghai Lu Acked-by: David Rientjes diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 541746f..74064e8 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -392,11 +392,12 @@ void __init numa_reset_distance(void) { size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); + /* numa_distance could be 1LU marking allocation failure, test cnt */ if (numa_distance_cnt) memblock_x86_free_range(__pa(numa_distance), __pa(numa_distance) + size); numa_distance_cnt = 0; - numa_distance = NULL; + numa_distance = NULL; /* enable table creation */ } static int __init numa_alloc_distance(void) @@ -447,6 +448,14 @@ static int __init numa_alloc_distance(void) * Set the distance from node @from to @to to @distance. If distance table * doesn't exist, one which is large enough to accomodate all the currently * known nodes will be created. + * + * If such table cannot be allocated, a warning is printed and further + * calls are ignored until the distance table is reset with + * numa_reset_distance(). + * + * If @from or @to is higher than the highest known node at the time of + * table creation or @distance doesn't make sense, the call is ignored. + * This is to allow simplification of specific NUMA config implementations. */ void __init numa_set_distance(int from, int to, int distance) { diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 0afa25d..aeecea9 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -379,7 +379,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) if (emu_nid_to_phys[i] == NUMA_NO_NODE) emu_nid_to_phys[i] = 0; - /* transform distance table */ + /* + * Transform distance table. numa_set_distance() ignores all + * out-of-bound distances. Just call it for every possible node + * combination. + */ numa_reset_distance(); for (i = 0; i < MAX_NUMNODES; i++) { for (j = 0; j < MAX_NUMNODES; j++) { -- cgit v0.10.2 From d04c579f971bf7d995db1ef7a7161c0143068859 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 3 Mar 2011 10:55:29 +0000 Subject: x86: Work around old gas bug Add extra parentheses around a couple of definitions introduced by "x86: Cleanup vector usage" and used in assembly macro arguments, and remove spaces. Without that old (2.16.1) gas would see more macro arguments than were actually specified. Reported-and-tested-by: Andrew Morton Signed-off-by: Jan Beulich Cc: Shaohua Li LKML-Reference: <4D6F81B10200007800034B0B@vpn.id2.novell.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 4980f48..6e976ee 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -126,14 +126,14 @@ /* up to 32 vectors used for spreading out TLB flushes: */ #if NR_CPUS <= 32 -# define NUM_INVALIDATE_TLB_VECTORS NR_CPUS +# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS) #else -# define NUM_INVALIDATE_TLB_VECTORS 32 +# define NUM_INVALIDATE_TLB_VECTORS (32) #endif -#define INVALIDATE_TLB_VECTOR_END 0xee +#define INVALIDATE_TLB_VECTOR_END (0xee) #define INVALIDATE_TLB_VECTOR_START \ - (INVALIDATE_TLB_VECTOR_END - NUM_INVALIDATE_TLB_VECTORS + 1) + (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1) #define NR_VECTORS 256 -- cgit v0.10.2 From f89112502805c1f6a6955f90ad158e538edb319d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 4 Mar 2011 10:26:36 +0100 Subject: x86-64, NUMA: Revert NUMA affine page table allocation This patch reverts NUMA affine page table allocation added by commit 1411e0ec31 (x86-64, numa: Put pgtable to local node memory). The commit made an undocumented change where the kernel linear mapping strictly follows intersection of e820 memory map and NUMA configuration. If the physical memory configuration has holes or NUMA nodes are not properly aligned, this leads to using unnecessarily smaller mapping size which leads to increased TLB pressure. For details, http://thread.gmane.org/gmane.linux.kernel/1104672 Patches to fix the problem have been proposed but the underlying code needs more cleanup and the approach itself seems a bit heavy handed and it has been determined to revert the feature for now and come back to it in the next developement cycle. http://thread.gmane.org/gmane.linux.kernel/1105959 As init_memory_mapping_high() callsites have been consolidated since the commit, reverting is done manually. Also, the RED-PEN comment in arch/x86/mm/init.c is not restored as the problem no longer exists with memblock based top-down early memory allocation. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 97e6007..bce688d 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -54,8 +54,6 @@ static inline phys_addr_t get_max_mapped(void) extern unsigned long init_memory_mapping(unsigned long start, unsigned long end); -void init_memory_mapping_high(void); - extern void initmem_init(void); extern void free_initmem(void); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 46e684f..c3a606c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -963,6 +963,14 @@ void __init setup_arch(char **cmdline_p) max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn< max_low_pfn) { + max_pfn_mapped = init_memory_mapping(1UL<<32, + max_pfn<start); - final_end = min_t(unsigned long, end_pfn<end); - - if (final_end <= final_start) - return 0; - - pfn_mapped = init_memory_mapping(final_start, final_end); - - if (pfn_mapped > data->pfn_mapped) - data->pfn_mapped = pfn_mapped; - - return 0; -} - -static unsigned long __init_refok -init_memory_mapping_active_regions(unsigned long start, unsigned long end) -{ - struct mapping_work_data data; - - data.start = start; - data.end = end; - data.pfn_mapped = 0; - - work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data); - - return data.pfn_mapped; -} - -void __init_refok init_memory_mapping_high(void) -{ - if (max_pfn > max_low_pfn) { - max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32, - max_pfn< Date: Fri, 4 Mar 2011 14:49:28 +0100 Subject: x86-64, NUMA: Fix numa_emulation code with node0 without RAM On one system that does not have RAM on node0. When numa_emulation is compiled in, and 1. boot system without numa=fake... 2. or boot system with numa=fake=128 to make emulation fail will get: [ 0.092026] ------------[ cut here ]------------ [ 0.096005] kernel BUG at arch/x86/mm/numa_emulation.c:439! [ 0.096005] invalid opcode: 0000 [#1] SMP [ 0.096005] last sysfs file: [ 0.096005] CPU 0 [ 0.096005] Modules linked in: [ 0.096005] [ 0.096005] Pid: 0, comm: swapper Not tainted 2.6.38-rc6-tip-yh-03869-gcb0491d-dirty #684 Sun Microsystems Sun Fire X4240/Sun Fire X4240 [ 0.096005] RIP: 0010:[] [] numa_add_cpu+0x56/0xcf [ 0.096005] RSP: 0000:ffffffff82437ed8 EFLAGS: 00010246 ... [ 0.096005] Call Trace: [ 0.096005] [] identify_cpu+0x2d7/0x2df [ 0.096005] [] identify_boot_cpu+0x10/0x30 [ 0.096005] [] check_bugs+0x9/0x2d [ 0.096005] [] start_kernel+0x3d7/0x3f1 [ 0.096005] [] x86_64_start_reservations+0x9c/0xa0 [ 0.096005] [] x86_64_start_kernel+0x1dd/0x1e8 [ 0.096005] Code: 74 06 48 8d 04 90 eb 0f 48 c7 c0 30 d9 00 00 48 03 04 d5 90 0f 60 82 8b 00 83 f8 ff 74 0d 0f a3 05 8b 7e 92 00 19 d2 85 d2 75 02 <0f> 0b 48 98 be 00 01 00 00 48 c7 c7 e0 44 60 82 44 8b 2c 85 e0 [ 0.096005] RIP [] numa_add_cpu+0x56/0xcf [ 0.096005] RSP [ 0.096026] ---[ end trace a7919e7f17c0a725 ]--- We need to use early_cpu_to_node() directly, because numa_cpu_node() will return node0 that is not onlined. Signed-off-by: Yinghai Lu Signed-off-by: Tejun Heo diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index aeecea9..75b31dc 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -417,9 +417,7 @@ void __cpuinit numa_add_cpu(int cpu) { int physnid, nid; - nid = numa_cpu_node(cpu); - if (nid == NUMA_NO_NODE) - nid = early_cpu_to_node(cpu); + nid = early_cpu_to_node(cpu); BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); physnid = emu_nid_to_phys[nid]; -- cgit v0.10.2 From c09cedf4f75f1e47ea17f55e18e9cfb81bec8575 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 4 Mar 2011 15:17:21 +0100 Subject: x86-64, NUMA: Clean up initmem_init() This patch cleans initmem_init() so that it is more readable and doesn't use an unnecessary array of function pointers to convolute the flow of the code. It also makes it obvious that dummy_numa_init() will always succeed (and documents that requirement) so that the existing BUG() is never actually reached. No functional change. -tj: Updated comment for dummy_numa_init() slightly. Signed-off-by: David Rientjes Signed-off-by: Tejun Heo diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 86491ba..9ec0f20 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -562,6 +562,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) return 0; } +/** + * dummy_numma_init - Fallback dummy NUMA init + * + * Used if there's no underlying NUMA architecture, NUMA initialization + * fails, or NUMA is disabled on the command line. + * + * Must online at least one node and add memory blocks that cover all + * allowed memory. This function must not fail. + */ static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", @@ -575,57 +584,64 @@ static int __init dummy_numa_init(void) return 0; } -void __init initmem_init(void) +static int __init numa_init(int (*init_func)(void)) { - int (*numa_init[])(void) = { [2] = dummy_numa_init }; - int i, j; - - if (!numa_off) { -#ifdef CONFIG_ACPI_NUMA - numa_init[0] = x86_acpi_numa_init; -#endif -#ifdef CONFIG_AMD_NUMA - numa_init[1] = amd_numa_init; -#endif - } + int i; + int ret; - for (i = 0; i < ARRAY_SIZE(numa_init); i++) { - if (!numa_init[i]) - continue; + for (i = 0; i < MAX_LOCAL_APIC; i++) + set_apicid_to_node(i, NUMA_NO_NODE); - for (j = 0; j < MAX_LOCAL_APIC; j++) - set_apicid_to_node(j, NUMA_NO_NODE); + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); + remove_all_active_ranges(); + numa_reset_distance(); - nodes_clear(numa_nodes_parsed); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - memset(&numa_meminfo, 0, sizeof(numa_meminfo)); - remove_all_active_ranges(); - numa_reset_distance(); + ret = init_func(); + if (ret < 0) + return ret; + ret = numa_cleanup_meminfo(&numa_meminfo); + if (ret < 0) + return ret; - if (numa_init[i]() < 0) - continue; + numa_emulation(&numa_meminfo, numa_distance_cnt); - if (numa_cleanup_meminfo(&numa_meminfo) < 0) - continue; + ret = numa_register_memblks(&numa_meminfo); + if (ret < 0) + return ret; - numa_emulation(&numa_meminfo, numa_distance_cnt); + for (i = 0; i < nr_cpu_ids; i++) { + int nid = early_cpu_to_node(i); - if (numa_register_memblks(&numa_meminfo) < 0) + if (nid == NUMA_NO_NODE) continue; + if (!node_online(nid)) + numa_clear_node(i); + } + numa_init_array(); + return 0; +} - for (j = 0; j < nr_cpu_ids; j++) { - int nid = early_cpu_to_node(j); +void __init initmem_init(void) +{ + int ret; - if (nid == NUMA_NO_NODE) - continue; - if (!node_online(nid)) - numa_clear_node(j); - } - numa_init_array(); - return; + if (!numa_off) { +#ifdef CONFIG_ACPI_NUMA + ret = numa_init(x86_acpi_numa_init); + if (!ret) + return; +#endif +#ifdef CONFIG_AMD_NUMA + ret = numa_init(amd_numa_init); + if (!ret) + return; +#endif } - BUG(); + + numa_init(dummy_numa_init); } unsigned long __init numa_free_all_bootmem(void) -- cgit v0.10.2 From 078a198906c796981f93ff100c210506e91aade5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 4 Mar 2011 16:32:02 +0100 Subject: x86-64, NUMA: Don't assume phys node 0 is always online in numa_emulation() Undetermined entries in emu_nid_to_phys[] are filled with zero assuming that physical node 0 is always online; however, this might not be true depending on hardware configuration. Find a physical node which is actually online and use it instead. Signed-off-by: Tejun Heo Reported-by: David Rientjes LKML-Reference: diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 75b31dc..3696be0 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -301,6 +301,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) const u64 max_addr = max_pfn << PAGE_SHIFT; u8 *phys_dist = NULL; size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); + int dfl_phys_nid; int i, j, ret; if (!emu_cmdline) @@ -357,6 +358,19 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) node_distance(i, j); } + /* determine the default phys nid to use for unmapped nodes */ + dfl_phys_nid = NUMA_NO_NODE; + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { + if (emu_nid_to_phys[i] != NUMA_NO_NODE) { + dfl_phys_nid = emu_nid_to_phys[i]; + break; + } + } + if (dfl_phys_nid == NUMA_NO_NODE) { + pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n"); + goto no_emu; + } + /* commit */ *numa_meminfo = ei; @@ -377,7 +391,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) /* make sure all emulated nodes are mapped to a physical node */ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) if (emu_nid_to_phys[i] == NUMA_NO_NODE) - emu_nid_to_phys[i] = 0; + emu_nid_to_phys[i] = dfl_phys_nid; /* * Transform distance table. numa_set_distance() ignores all -- cgit v0.10.2 From 56396e6823fe9b42fe9cf9403d6ed67756255f70 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 11 Mar 2011 10:33:31 +0100 Subject: x86-64, NUMA: Don't call numa_set_distanc() for all possible node combinations during emulation The distance transforming in numa_emulation() used to call numa_set_distance() for all MAX_NUMNODES * MAX_NUMNODES node combinations regardless of which are enabled. As numa_set_distance() ignores all out-of-bound distance settings, this doesn't cause any problem other than looping unnecessarily many times during boot. However, as MAX_NUMNODES * MAX_NUMNODES can be pretty high, update the code such that it iterates through only the enabled combinations. Yinghai Lu identified the issue and provided an initial patch to address the issue; however, the patch was incorrect in that it didn't build emulated distance table when there's no physical distance table and unnecessarily complex. http://thread.gmane.org/gmane.linux.kernel/1107986/focus=1107988 Signed-off-by: Tejun Heo Reported-by: Yinghai Lu Acked-by: Yinghai Lu diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 3696be0..ad091e4 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -301,7 +301,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) const u64 max_addr = max_pfn << PAGE_SHIFT; u8 *phys_dist = NULL; size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); - int dfl_phys_nid; + int max_emu_nid, dfl_phys_nid; int i, j, ret; if (!emu_cmdline) @@ -358,12 +358,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) node_distance(i, j); } - /* determine the default phys nid to use for unmapped nodes */ + /* + * Determine the max emulated nid and the default phys nid to use + * for unmapped nodes. + */ + max_emu_nid = 0; dfl_phys_nid = NUMA_NO_NODE; for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { if (emu_nid_to_phys[i] != NUMA_NO_NODE) { - dfl_phys_nid = emu_nid_to_phys[i]; - break; + max_emu_nid = i; + if (dfl_phys_nid == NUMA_NO_NODE) + dfl_phys_nid = emu_nid_to_phys[i]; } } if (dfl_phys_nid == NUMA_NO_NODE) { @@ -393,14 +398,10 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) if (emu_nid_to_phys[i] == NUMA_NO_NODE) emu_nid_to_phys[i] = dfl_phys_nid; - /* - * Transform distance table. numa_set_distance() ignores all - * out-of-bound distances. Just call it for every possible node - * combination. - */ + /* transform distance table */ numa_reset_distance(); - for (i = 0; i < MAX_NUMNODES; i++) { - for (j = 0; j < MAX_NUMNODES; j++) { + for (i = 0; i < max_emu_nid + 1; i++) { + for (j = 0; j < max_emu_nid + 1; j++) { int physi = emu_nid_to_phys[i]; int physj = emu_nid_to_phys[j]; int dist; -- cgit v0.10.2 From 25542c646afbf14c43fa7d2b443055cadb73b07a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 15 Mar 2011 09:57:37 +0800 Subject: x86, tlb, UV: Do small micro-optimization for native_flush_tlb_others() native_flush_tlb_others() is called from: flush_tlb_current_task() flush_tlb_mm() flush_tlb_page() All these functions disable preemption explicitly, so we can use smp_processor_id() instead of get_cpu() and put_cpu(). Signed-off-by: Xiao Guangrong Cc: Cliff Wickman LKML-Reference: <4D7EC791.4040003@cn.fujitsu.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 55272d7..d6c0418 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -208,11 +208,10 @@ void native_flush_tlb_others(const struct cpumask *cpumask, if (is_uv_system()) { unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); if (cpumask) flush_tlb_others_ipi(cpumask, mm, va); - put_cpu(); return; } flush_tlb_others_ipi(cpumask, mm, va); -- cgit v0.10.2