summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-03-17 21:18:42 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 22:09:34 (GMT)
commit0a6b76dd23fa08c5fd7b68acdb55018a37afd4aa (patch)
treec2e94f95e0217dedaca710315500a9495fb021a6 /mm
parentcdcbb72ebfec52373e57092eccaadd5a6e261c3e (diff)
downloadlinux-0a6b76dd23fa08c5fd7b68acdb55018a37afd4aa.tar.xz
mm: workingset: make shadow node shrinker memcg aware
Workingset code was recently made memcg aware, but shadow node shrinker is still global. As a result, one small cgroup can consume all memory available for shadow nodes, possibly hurting other cgroups by reclaiming their shadow nodes, even though reclaim distances stored in its shadow nodes have no effect. To avoid this, we need to make shadow node shrinker memcg aware. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/workingset.c10
2 files changed, 9 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 341bf86..ae8b81c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -638,9 +638,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
}
-static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid,
- unsigned int lru_mask)
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+ int nid, unsigned int lru_mask)
{
unsigned long nr = 0;
int zid;
diff --git a/mm/workingset.c b/mm/workingset.c
index 68e8cd9..8a75f8d 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -349,8 +349,12 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
local_irq_enable();
- pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
- node_page_state(sc->nid, NR_INACTIVE_FILE);
+ if (memcg_kmem_enabled())
+ pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+ LRU_ALL_FILE);
+ else
+ pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
+ node_page_state(sc->nid, NR_INACTIVE_FILE);
/*
* Active cache pages are limited to 50% of memory, and shadow
@@ -460,7 +464,7 @@ static struct shrinker workingset_shadow_shrinker = {
.count_objects = count_shadow_nodes,
.scan_objects = scan_shadow_nodes,
.seeks = DEFAULT_SEEKS,
- .flags = SHRINKER_NUMA_AWARE,
+ .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
};
/*