diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-07 10:29:24 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 12:47:53 (GMT) |
commit | 6688cc05473b36a0a3d3971e1adf1712919b32eb (patch) | |
tree | 2305dca14e2df669b48138088bd93cf51db37721 /mm/huge_memory.c | |
parent | 7851a45cd3f6198bf542c30e27b330e8eeb3736c (diff) | |
download | linux-fsl-qoriq-6688cc05473b36a0a3d3971e1adf1712919b32eb.tar.xz |
mm: numa: Do not group on RO pages
And here's a little something to make sure not the whole world ends up
in a single group.
As while we don't migrate shared executable pages, we do scan/fault on
them. And since everybody links to libc, everybody ends up in the same
group.
Suggested-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1381141781-10992-47-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index becf92c..7ab4e32 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1285,6 +1285,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, int target_nid, last_cpupid = -1; bool page_locked; bool migrated = false; + int flags = 0; spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(pmd, *pmdp))) @@ -1299,6 +1300,14 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); /* + * Avoid grouping on DSO/COW pages in specific and RO pages + * in general, RO pages shouldn't hurt as much anyway since + * they can be in shared cache state. + */ + if (!pmd_write(pmd)) + flags |= TNF_NO_GROUP; + + /* * Acquire the page lock to serialise THP migrations but avoid dropping * page_table_lock if at all possible */ @@ -1343,8 +1352,10 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); migrated = migrate_misplaced_transhuge_page(mm, vma, pmdp, pmd, addr, page, target_nid); - if (migrated) + if (migrated) { + flags |= TNF_MIGRATED; page_nid = target_nid; + } goto out; clear_pmdnuma: @@ -1362,7 +1373,7 @@ out: page_unlock_anon_vma_read(anon_vma); if (page_nid != -1) - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, migrated); + task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); return 0; } |