summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-05-28 14:02:14 (GMT)
committerIngo Molnar <mingo@elte.hu>2008-07-08 10:48:13 (GMT)
commita7bf0bd5e6af7fe69342dabf2a3b721f0163469a (patch)
tree3f7e08f0df4c5eccf81732dcf95b8cc4efafa203 /arch/x86/mm
parent1ecd27657b735128a728ebf0c31fce5e1456718a (diff)
downloadlinux-a7bf0bd5e6af7fe69342dabf2a3b721f0163469a.tar.xz
build: add __page_aligned_data and __page_aligned_bss
Making a variable page-aligned by using __attribute__((section(".data.page_aligned"))) is fragile because if sizeof(variable) is not also a multiple of page size, it leaves variables in the remainder of the section unaligned. This patch introduces two new qualifiers, __page_aligned_data and __page_aligned_bss to set the section *and* the alignment of variables. This makes page-aligned variables more robust because the linker will make sure they're aligned properly. Unfortunately it requires *all* page-aligned data to use these macros... Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/ioremap.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 416ea41..0561fde 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -394,8 +394,7 @@ static int __init early_ioremap_debug_setup(char *str)
early_param("early_ioremap_debug", early_ioremap_debug_setup);
static __initdata int after_paging_init;
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
- __section(.bss.page_aligned);
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{