summaryrefslogtreecommitdiff
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 573e436..93d5f87 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -819,6 +819,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
(vma->vm_flags & VM_NOHUGEPAGE))
return false;
if (shmem_file(vma->vm_file)) {
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
+ return false;
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
HPAGE_PMD_NR);
}
@@ -1222,7 +1224,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
}
}
-#ifdef CONFIG_SHMEM
+#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
{
struct vm_area_struct *vma;
@@ -1681,8 +1683,6 @@ skip:
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
- if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
- goto skip;
while (khugepaged_scan.address < hend) {
int ret;
@@ -1694,9 +1694,12 @@ skip:
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
if (shmem_file(vma->vm_file)) {
- struct file *file = get_file(vma->vm_file);
+ struct file *file;
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
+ if (!shmem_huge_enabled(vma))
+ goto skip;
+ file = get_file(vma->vm_file);
up_read(&mm->mmap_sem);
ret = 1;
khugepaged_scan_shmem(mm, file->f_mapping,