summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/vdso.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-07-09 18:22:13 (GMT)
committerCatalin Marinas <catalin.marinas@arm.com>2014-07-17 15:18:57 (GMT)
commit601255ae3c98fdeeee3a8bb4696425e4f868b4f1 (patch)
tree02f118302b7d2f01c0ee35a026e406976c23fc73 /arch/arm64/kernel/vdso.c
parent2fea7f6c98f5957e539eb8aa0ce849729b900342 (diff)
downloadlinux-601255ae3c98fdeeee3a8bb4696425e4f868b4f1.tar.xz
arm64: vdso: move data page before code pages
Andy pointed out that binutils generates additional sections in the vdso image (e.g. section string table) which, if our .text section gets big enough, could cross a page boundary and end up screwing up the location where the kernel expects to put the data page. This patch solves the issue in the same manner as x86_32, by moving the data page before the code pages. Cc: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/vdso.c')
-rw-r--r--arch/arm64/kernel/vdso.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 60ae120..24f2e8c 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -121,8 +121,8 @@ static int __init vdso_init(void)
}
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
- vdso_pages + 1, vdso_pages, 1L, &vdso_start);
+ pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+ vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -130,22 +130,22 @@ static int __init vdso_init(void)
if (vdso_pagelist == NULL)
return -ENOMEM;
+ /* Grab the vDSO data page. */
+ vdso_pagelist[0] = virt_to_page(vdso_data);
+
/* Grab the vDSO code pages. */
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
-
- /* Grab the vDSO data page. */
- vdso_pagelist[i] = virt_to_page(vdso_data);
+ vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Populate the special mapping structures */
vdso_spec[0] = (struct vm_special_mapping) {
- .name = "[vdso]",
+ .name = "[vvar]",
.pages = vdso_pagelist,
};
vdso_spec[1] = (struct vm_special_mapping) {
- .name = "[vvar]",
- .pages = vdso_pagelist + vdso_pages,
+ .name = "[vdso]",
+ .pages = &vdso_pagelist[1],
};
return 0;
@@ -169,22 +169,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
ret = ERR_PTR(vdso_base);
goto up_fail;
}
- mm->context.vdso = (void *)vdso_base;
-
- ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ VM_READ|VM_MAYREAD,
&vdso_spec[0]);
if (IS_ERR(ret))
goto up_fail;
- vdso_base += vdso_text_len;
- ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
- VM_READ|VM_MAYREAD,
+ vdso_base += PAGE_SIZE;
+ mm->context.vdso = (void *)vdso_base;
+ ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_spec[1]);
if (IS_ERR(ret))
goto up_fail;
+
up_write(&mm->mmap_sem);
return 0;