diff options
author | Mel Gorman <mgorman@suse.de> | 2014-01-21 23:49:13 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-22 00:19:44 (GMT) |
commit | aec6a8889a98a0cd58357cd0937a25189908f191 (patch) | |
tree | 4c0bd0fa7a3d9e6edc0f863782e1d5807ebefb12 /arch/parisc/mm | |
parent | ece86e222db48d04bda218a2be70e384518bb08c (diff) | |
download | linux-aec6a8889a98a0cd58357cd0937a25189908f191.tar.xz |
mm, show_mem: remove SHOW_MEM_FILTER_PAGE_COUNT
Commit 4b59e6c47309 ("mm, show_mem: suppress page counts in
non-blockable contexts") introduced SHOW_MEM_FILTER_PAGE_COUNT to
suppress PFN walks on large memory machines. Commit c78e93630d15 ("mm:
do not walk all of system memory during show_mem") avoided a PFN walk in
the generic show_mem helper which removes the requirement for
SHOW_MEM_FILTER_PAGE_COUNT in that case.
This patch removes PFN walkers from the arch-specific implementations
that report on a per-node or per-zone granularity. ARM and unicore32
still do a PFN walk as they report memory usage on each bank which is a
much finer granularity where the debugging information may still be of
use. As the remaining arches doing PFN walks have relatively small
amounts of memory, this patch simply removes SHOW_MEM_FILTER_PAGE_COUNT.
[akpm@linux-foundation.org: fix parisc]
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: James Bottomley <jejb@parisc-linux.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/parisc/mm')
-rw-r--r-- | arch/parisc/mm/init.c | 59 |
1 files changed, 17 insertions, 42 deletions
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 96f8168..ae085ad 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -645,55 +645,30 @@ EXPORT_SYMBOL(empty_zero_page); void show_mem(unsigned int filter) { - int i,free = 0,total = 0,reserved = 0; - int shared = 0, cached = 0; + int total = 0,reserved = 0; + pg_data_t *pgdat; printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; -#ifndef CONFIG_DISCONTIGMEM - i = max_mapnr; - while (i-- > 0) { - total++; - if (PageReserved(mem_map+i)) - reserved++; - else if (PageSwapCache(mem_map+i)) - cached++; - else if (!page_count(&mem_map[i])) - free++; - else - shared += page_count(&mem_map[i]) - 1; - } -#else - for (i = 0; i < npmem_ranges; i++) { - int j; - for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { - struct page *p; - unsigned long flags; - - pgdat_resize_lock(NODE_DATA(i), &flags); - p = nid_page_nr(i, j) - node_start_pfn(i); - - total++; - if (PageReserved(p)) - reserved++; - else if (PageSwapCache(p)) - cached++; - else if (!page_count(p)) - free++; - else - shared += page_count(p) - 1; - pgdat_resize_unlock(NODE_DATA(i), &flags); - } + for_each_online_pgdat(pgdat) { + unsigned long flags; + int zoneid; + + pgdat_resize_lock(pgdat, &flags); + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + total += zone->present_pages; + reserved = zone->present_pages - zone->managed_pages; + } + pgdat_resize_unlock(pgdat, &flags); } -#endif + printk(KERN_INFO "%d pages of RAM\n", total); printk(KERN_INFO "%d reserved pages\n", reserved); - printk(KERN_INFO "%d pages shared\n", shared); - printk(KERN_INFO "%d pages swap cached\n", cached); - #ifdef CONFIG_DISCONTIGMEM { |