diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 1 | ||||
-rw-r--r-- | mm/memblock.c | 3 | ||||
-rw-r--r-- | mm/mempolicy.c | 19 | ||||
-rw-r--r-- | mm/page-writeback.c | 57 | ||||
-rw-r--r-- | mm/readahead.c | 15 | ||||
-rw-r--r-- | mm/slab_common.c | 19 | ||||
-rw-r--r-- | mm/slub.c | 12 | ||||
-rw-r--r-- | mm/vmscan.c | 23 |
8 files changed, 64 insertions, 85 deletions
diff --git a/mm/internal.h b/mm/internal.h index 612c14f..29e1e76 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -83,7 +83,6 @@ extern unsigned long highest_memmap_pfn; */ extern int isolate_lru_page(struct page *page); extern void putback_lru_page(struct page *page); -extern unsigned long zone_reclaimable_pages(struct zone *zone); extern bool zone_reclaimable(struct zone *zone); /* diff --git a/mm/memblock.c b/mm/memblock.c index 87d21a6..39a31e7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1077,6 +1077,9 @@ static void * __init memblock_virt_alloc_internal( if (!align) align = SMP_CACHE_BYTES; + if (max_addr > memblock.current_limit) + max_addr = memblock.current_limit; + again: alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, nid); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 36cb46c..873de7e 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2654,7 +2654,7 @@ void mpol_free_shared_policy(struct shared_policy *p) } #ifdef CONFIG_NUMA_BALANCING -static bool __initdata numabalancing_override; +static int __initdata numabalancing_override; static void __init check_numabalancing_enable(void) { @@ -2663,9 +2663,15 @@ static void __init check_numabalancing_enable(void) if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) numabalancing_default = true; + /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ + if (numabalancing_override) + set_numabalancing_state(numabalancing_override == 1); + if (nr_node_ids > 1 && !numabalancing_override) { - printk(KERN_INFO "Enabling automatic NUMA balancing. " - "Configure with numa_balancing= or the kernel.numa_balancing sysctl"); + pr_info("%s automatic NUMA balancing. " + "Configure with numa_balancing= or the " + "kernel.numa_balancing sysctl", + numabalancing_default ? "Enabling" : "Disabling"); set_numabalancing_state(numabalancing_default); } } @@ -2675,18 +2681,17 @@ static int __init setup_numabalancing(char *str) int ret = 0; if (!str) goto out; - numabalancing_override = true; if (!strcmp(str, "enable")) { - set_numabalancing_state(true); + numabalancing_override = 1; ret = 1; } else if (!strcmp(str, "disable")) { - set_numabalancing_state(false); + numabalancing_override = -1; ret = 1; } out: if (!ret) - printk(KERN_WARNING "Unable to parse numa_balancing=\n"); + pr_warn("Unable to parse numa_balancing=\n"); return ret; } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6380758..2d30e2c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -191,6 +191,26 @@ static unsigned long writeout_period_time = 0; * global dirtyable memory first. */ +/** + * zone_dirtyable_memory - number of dirtyable pages in a zone + * @zone: the zone + * + * Returns the zone's number of pages potentially available for dirty + * page cache. This is the base value for the per-zone dirty limits. + */ +static unsigned long zone_dirtyable_memory(struct zone *zone) +{ + unsigned long nr_pages; + + nr_pages = zone_page_state(zone, NR_FREE_PAGES); + nr_pages -= min(nr_pages, zone->dirty_balance_reserve); + + nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); + nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); + + return nr_pages; +} + static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM @@ -198,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) unsigned long x = 0; for_each_node_state(node, N_HIGH_MEMORY) { - struct zone *z = - &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; + struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; - x += zone_page_state(z, NR_FREE_PAGES) + - zone_reclaimable_pages(z) - z->dirty_balance_reserve; + x += zone_dirtyable_memory(z); } /* * Unreclaimable memory (kernel memory or anonymous memory @@ -238,9 +256,12 @@ static unsigned long global_dirtyable_memory(void) { unsigned long x; - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); + x = global_page_state(NR_FREE_PAGES); x -= min(x, dirty_balance_reserve); + x += global_page_state(NR_INACTIVE_FILE); + x += global_page_state(NR_ACTIVE_FILE); + if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); @@ -289,32 +310,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) } /** - * zone_dirtyable_memory - number of dirtyable pages in a zone - * @zone: the zone - * - * Returns the zone's number of pages potentially available for dirty - * page cache. This is the base value for the per-zone dirty limits. - */ -static unsigned long zone_dirtyable_memory(struct zone *zone) -{ - /* - * The effective global number of dirtyable pages may exclude - * highmem as a big-picture measure to keep the ratio between - * dirty memory and lowmem reasonable. - * - * But this function is purely about the individual zone and a - * highmem zone can hold its share of dirty pages, so we don't - * care about vm_highmem_is_dirtyable here. - */ - unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + - zone_reclaimable_pages(zone); - - /* don't allow this to underflow */ - nr_pages -= min(nr_pages, zone->dirty_balance_reserve); - return nr_pages; -} - -/** * zone_dirty_limit - maximum number of dirty pages allowed in a zone * @zone: the zone * diff --git a/mm/readahead.c b/mm/readahead.c index 7cdbb44..0de2360 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -211,8 +211,6 @@ out: int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) { - int ret = 0; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) return -EINVAL; @@ -226,15 +224,13 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, this_chunk = nr_to_read; err = __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); - if (err < 0) { - ret = err; - break; - } - ret += err; + if (err < 0) + return err; + offset += this_chunk; nr_to_read -= this_chunk; } - return ret; + return 0; } /* @@ -576,8 +572,7 @@ do_readahead(struct address_space *mapping, struct file *filp, if (!mapping || !mapping->a_ops) return -EINVAL; - force_page_cache_readahead(mapping, filp, index, nr); - return 0; + return force_page_cache_readahead(mapping, filp, index, nr); } SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) diff --git a/mm/slab_common.c b/mm/slab_common.c index 8e40321..1ec3c61 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -233,14 +233,17 @@ out_unlock: mutex_unlock(&slab_mutex); put_online_cpus(); - /* - * There is no point in flooding logs with warnings or especially - * crashing the system if we fail to create a cache for a memcg. In - * this case we will be accounting the memcg allocation to the root - * cgroup until we succeed to create its own cache, but it isn't that - * critical. - */ - if (err && !memcg) { + if (err) { + /* + * There is no point in flooding logs with warnings or + * especially crashing the system if we fail to create a cache + * for a memcg. In this case we will be accounting the memcg + * allocation to the root cgroup until we succeed to create its + * own cache, but it isn't that critical. + */ + if (!memcg) + return NULL; + if (flags & SLAB_PANIC) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", name, err); @@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s, new.freelist = freelist; } - VM_BUG_ON_PAGE(new.frozen, &new); + VM_BUG_ON(new.frozen); new.frozen = 1; if (!__cmpxchg_double_slab(s, page, @@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, set_freepointer(s, freelist, prior); new.counters = counters; new.inuse--; - VM_BUG_ON_PAGE(!new.frozen, &new); + VM_BUG_ON(!new.frozen); } while (!__cmpxchg_double_slab(s, page, prior, counters, @@ -1840,7 +1840,7 @@ redo: old.freelist = page->freelist; old.counters = page->counters; - VM_BUG_ON_PAGE(!old.frozen, &old); + VM_BUG_ON(!old.frozen); /* Determine target state of the slab */ new.counters = old.counters; @@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s, old.freelist = page->freelist; old.counters = page->counters; - VM_BUG_ON_PAGE(!old.frozen, &old); + VM_BUG_ON(!old.frozen); new.counters = old.counters; new.freelist = old.freelist; @@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) counters = page->counters; new.counters = counters; - VM_BUG_ON_PAGE(!new.frozen, &new); + VM_BUG_ON(!new.frozen); new.inuse = page->objects; new.frozen = freelist != NULL; @@ -2319,7 +2319,7 @@ load_freelist: * page is pointing to the page from which the objects are obtained. * That page must be frozen for per cpu allocations to work. */ - VM_BUG_ON_PAGE(!c->page->frozen, c->page); + VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); local_irq_restore(flags); diff --git a/mm/vmscan.c b/mm/vmscan.c index 90c4075..a9c74b4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc) } #endif -unsigned long zone_reclaimable_pages(struct zone *zone) +static unsigned long zone_reclaimable_pages(struct zone *zone) { int nr; @@ -3315,27 +3315,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) wake_up_interruptible(&pgdat->kswapd_wait); } -/* - * The reclaimable count would be mostly accurate. - * The less reclaimable pages may be - * - mlocked pages, which will be moved to unevictable list when encountered - * - mapped pages, which may require several travels to be reclaimed - * - dirty pages, which is not "instantly" reclaimable - */ -unsigned long global_reclaimable_pages(void) -{ - int nr; - - nr = global_page_state(NR_ACTIVE_FILE) + - global_page_state(NR_INACTIVE_FILE); - - if (get_nr_swap_pages() > 0) - nr += global_page_state(NR_ACTIVE_ANON) + - global_page_state(NR_INACTIVE_ANON); - - return nr; -} - #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of |