summaryrefslogtreecommitdiff
path: root/arch/x86/vdso
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 12:03:12 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 12:03:12 (GMT)
commite34eb39c1c791fe79da6aae0d9057f0c74c2f0ed (patch)
treecfc27d987b2c888f189f35326702220b694596cc /arch/x86/vdso
parent396e6e49c58bb23d1814d3c240c736c9f01523c5 (diff)
parent910b2c5122ab787179a790ca1dec616fc80f0173 (diff)
downloadlinux-e34eb39c1c791fe79da6aae0d9057f0c74c2f0ed.tar.xz
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, amd: Include linux/elf.h since we use stuff from asm/elf.h x86: cache_info: Update calculation of AMD L3 cache indices x86: cache_info: Kill the atomic allocation in amd_init_l3_cache() x86: cache_info: Kill the moronic shadow struct x86: cache_info: Remove bogus free of amd_l3_cache data x86, amd: Include elf.h explicitly, prepare the code for the module.h split x86-32, amd: Move va_align definition to unbreak 32-bit build x86, amd: Move BSP code to cpu_dev helper x86: Add a BSP cpu_dev helper x86, amd: Avoid cache aliasing penalties on AMD family 15h
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r--arch/x86/vdso/vma.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 316fbca..153407c 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -89,6 +89,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
addr = start + (offset << PAGE_SHIFT);
if (addr >= end)
addr = end;
+
+ /*
+ * page-align it here so that get_unmapped_area doesn't
+ * align it wrongfully again to the next page. addr can come in 4K
+ * unaligned here as a result of stack start randomization.
+ */
+ addr = PAGE_ALIGN(addr);
+ addr = align_addr(addr, NULL, ALIGN_VDSO);
+
return addr;
}