summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2017-09-01 04:04:22 (GMT)
committerAlex Shi <alex.shi@linaro.org>2017-09-01 04:04:22 (GMT)
commit99a15512fa583684b8cc4ad4805e1e690609cdc7 (patch)
tree6c6b4c091b4b89e9da69030b26d5a68e833d98a4 /mm
parent7c6819fafcd4d26f655ad9b0441f79773c69cf99 (diff)
parent0eed54bdbd1b922004fe05dc8bf3815f2e5723d7 (diff)
downloadlinux-99a15512fa583684b8cc4ad4805e1e690609cdc7.tar.xz
Merge tag 'v4.9.46' into linux-linaro-lsk-v4.9
This is the 4.9.46 stable release
Diffstat (limited to 'mm')
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memblock.c38
-rw-r--r--mm/memory.c12
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/nobootmem.c16
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/shmem.c4
8 files changed, 38 insertions, 54 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 253b153..63a1216 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -331,8 +331,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
pte_offset_map_lock(mm, pmd, addr, &ptl);
goto out;
}
- put_page(page);
unlock_page(page);
+ put_page(page);
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--;
addr -= PAGE_SIZE;
diff --git a/mm/memblock.c b/mm/memblock.c
index 9f6be74..3740af5 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -297,31 +297,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
}
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
-
-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
- phys_addr_t *addr)
-{
- if (memblock.reserved.regions == memblock_reserved_init_regions)
- return 0;
-
- *addr = __pa(memblock.reserved.regions);
-
- return PAGE_ALIGN(sizeof(struct memblock_region) *
- memblock.reserved.max);
-}
-
-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
- phys_addr_t *addr)
+/**
+ * Discard memory and reserved arrays if they were allocated
+ */
+void __init memblock_discard(void)
{
- if (memblock.memory.regions == memblock_memory_init_regions)
- return 0;
+ phys_addr_t addr, size;
- *addr = __pa(memblock.memory.regions);
+ if (memblock.reserved.regions != memblock_reserved_init_regions) {
+ addr = __pa(memblock.reserved.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.reserved.max);
+ __memblock_free_late(addr, size);
+ }
- return PAGE_ALIGN(sizeof(struct memblock_region) *
- memblock.memory.max);
+ if (memblock.memory.regions != memblock_memory_init_regions) {
+ addr = __pa(memblock.memory.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.memory.max);
+ __memblock_free_late(addr, size);
+ }
}
-
#endif
/**
diff --git a/mm/memory.c b/mm/memory.c
index 9bf3da0..d064caf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3635,8 +3635,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
* further.
*/
if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
- && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
+ && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
+
+ /*
+ * We are going to enforce SIGBUS but the PF path might have
+ * dropped the mmap_sem already so take it again so that
+ * we do not break expectations of all arch specific PF paths
+ * and g-u-p
+ */
+ if (ret & VM_FAULT_RETRY)
+ down_read(&vma->vm_mm->mmap_sem);
ret = VM_FAULT_SIGBUS;
+ }
return ret;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 23471526..a8ab5e7 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -926,11 +926,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
- if (vma) {
- up_read(&current->mm->mmap_sem);
- vma = NULL;
- }
-
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 6850f62..821623f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -40,6 +40,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
+#include <linux/ptrace.h>
#include <asm/tlbflush.h>
@@ -1663,7 +1664,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
const int __user *, nodes,
int __user *, status, int, flags)
{
- const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
struct mm_struct *mm;
int err;
@@ -1687,14 +1687,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
/*
* Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
- * capabilities, superuser privileges or the same
- * userid as the target process.
+ * process. Use the regular "ptrace_may_access()" checks.
*/
- tcred = __task_cred(task);
- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
- !capable(CAP_SYS_NICE)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out;
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 487dad6..ab99812 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void)
NULL)
count += __free_memory_core(start, end);
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
- {
- phys_addr_t size;
-
- /* Free memblock.reserved array if it was allocated */
- size = get_allocated_memblock_reserved_regions_info(&start);
- if (size)
- count += __free_memory_core(start, start + size);
-
- /* Free memblock.memory array if it was allocated */
- size = get_allocated_memblock_memory_regions_info(&start);
- if (size)
- count += __free_memory_core(start, start + size);
- }
-#endif
-
return count;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9419aa4..2abf8d5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1587,6 +1587,10 @@ void __init page_alloc_init_late(void)
/* Reinit limits that are based on free pages after the kernel is up */
files_maxfiles_init();
#endif
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+ /* Discard memblock private memory */
+ memblock_discard();
+#endif
for_each_populated_zone(zone)
set_zone_contiguous(zone);
diff --git a/mm/shmem.c b/mm/shmem.c
index 7ee5444..004e0f87 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3810,7 +3810,7 @@ int __init shmem_init(void)
}
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
- if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
+ if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else
shmem_huge = 0; /* just in case it was patched */
@@ -3871,7 +3871,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
return -EINVAL;
shmem_huge = huge;
- if (shmem_huge < SHMEM_HUGE_DENY)
+ if (shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
return count;
}