summaryrefslogtreecommitdiff
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c92
1 files changed, 19 insertions, 73 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index bb94004..c046927 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -130,7 +130,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, addr);
if (!ptep)
goto out;
- ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
+ ptl = &mm->page_table_lock;
} else {
pmd = mm_find_pmd(mm, addr);
if (!pmd)
@@ -249,10 +249,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
__migration_entry_wait(mm, ptep, ptl);
}
-void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte)
+void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
{
- spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
+ spinlock_t *ptl = &(mm)->page_table_lock;
__migration_entry_wait(mm, pte, ptl);
}
@@ -442,60 +441,10 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
}
/*
- * Gigantic pages are so large that we do not guarantee that page++ pointer
- * arithmetic will work across the entire page. We need something more
- * specialized.
- */
-static void __copy_gigantic_page(struct page *dst, struct page *src,
- int nr_pages)
-{
- int i;
- struct page *dst_base = dst;
- struct page *src_base = src;
-
- for (i = 0; i < nr_pages; ) {
- cond_resched();
- copy_highpage(dst, src);
-
- i++;
- dst = mem_map_next(dst, dst_base, i);
- src = mem_map_next(src, src_base, i);
- }
-}
-
-static void copy_huge_page(struct page *dst, struct page *src)
-{
- int i;
- int nr_pages;
-
- if (PageHuge(src)) {
- /* hugetlbfs page */
- struct hstate *h = page_hstate(src);
- nr_pages = pages_per_huge_page(h);
-
- if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
- __copy_gigantic_page(dst, src, nr_pages);
- return;
- }
- } else {
- /* thp page */
- BUG_ON(!PageTransHuge(src));
- nr_pages = hpage_nr_pages(src);
- }
-
- for (i = 0; i < nr_pages; i++) {
- cond_resched();
- copy_highpage(dst + i, src + i);
- }
-}
-
-/*
* Copy the page to its new location
*/
void migrate_page_copy(struct page *newpage, struct page *page)
{
- int cpupid;
-
if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
@@ -532,13 +481,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
__set_page_dirty_nobuffers(newpage);
}
- /*
- * Copy NUMA information to the new page, to prevent over-eager
- * future migrations of this same page.
- */
- cpupid = page_cpupid_xchg_last(page, -1);
- page_cpupid_xchg_last(newpage, cpupid);
-
mlock_migrate_page(newpage, page);
ksm_migrate_page(newpage, page);
/*
@@ -1558,7 +1500,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
__GFP_NOWARN) &
~GFP_IOFS, 0);
if (newpage)
- page_cpupid_xchg_last(newpage, page_cpupid_last(page));
+ page_nid_xchg_last(newpage, page_nid_last(page));
return newpage;
}
@@ -1659,8 +1601,7 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
* node. Caller is expected to have an elevated reference count on
* the page that will be dropped by this function before returning.
*/
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
- int node)
+int migrate_misplaced_page(struct page *page, int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated;
@@ -1668,11 +1609,10 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
LIST_HEAD(migratepages);
/*
- * Don't migrate file pages that are mapped in multiple processes
- * with execute permissions as they are probably shared libraries.
+ * Don't migrate pages that are mapped in multiple processes.
+ * TODO: Handle false sharing detection instead of this hammer
*/
- if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
- (vma->vm_flags & VM_EXEC))
+ if (page_mapcount(page) != 1)
goto out;
/*
@@ -1715,7 +1655,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unsigned long address,
struct page *page, int node)
{
- spinlock_t *ptl;
unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
@@ -1724,6 +1663,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
int page_lru = page_is_file_cache(page);
/*
+ * Don't migrate pages that are mapped in multiple processes.
+ * TODO: Handle false sharing detection instead of this hammer
+ */
+ if (page_mapcount(page) != 1)
+ goto out_dropref;
+
+ /*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
@@ -1736,7 +1682,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
if (!new_page)
goto out_fail;
- page_cpupid_xchg_last(new_page, page_cpupid_last(page));
+ page_nid_xchg_last(new_page, page_nid_last(page));
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
@@ -1755,9 +1701,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */
- ptl = pmd_lock(mm, pmd);
+ spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, entry))) {
- spin_unlock(ptl);
+ spin_unlock(&mm->page_table_lock);
/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
@@ -1802,7 +1748,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
* before it's fully transferred to the new page.
*/
mem_cgroup_end_migration(memcg, page, new_page, true);
- spin_unlock(ptl);
+ spin_unlock(&mm->page_table_lock);
unlock_page(new_page);
unlock_page(page);