summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e6a5a1f..1aa63e7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1124,6 +1124,7 @@ again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
+ flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
@@ -3595,6 +3596,11 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
/* do counter updates before entering really critical section. */
check_sync_rss_stat(current);
+ if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
+ flags & FAULT_FLAG_INSTRUCTION,
+ flags & FAULT_FLAG_REMOTE))
+ return VM_FAULT_SIGSEGV;
+
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
@@ -3602,11 +3608,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (flags & FAULT_FLAG_USER)
mem_cgroup_oom_enable();
- if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
- flags & FAULT_FLAG_INSTRUCTION,
- flags & FAULT_FLAG_REMOTE))
- return VM_FAULT_SIGSEGV;
-
if (unlikely(is_vm_hugetlb_page(vma)))
ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
else
@@ -3634,8 +3635,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
* further.
*/
if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
- && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
+ && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
+
+ /*
+ * We are going to enforce SIGBUS but the PF path might have
+ * dropped the mmap_sem already so take it again so that
+ * we do not break expectations of all arch specific PF paths
+ * and g-u-p
+ */
+ if (ret & VM_FAULT_RETRY)
+ down_read(&vma->vm_mm->mmap_sem);
ret = VM_FAULT_SIGBUS;
+ }
return ret;
}