diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/.gitignore | 3 | ||||
-rw-r--r-- | arch/x86/boot/compressed/mkpiggy.c | 2 | ||||
-rw-r--r-- | arch/x86/boot/compressed/vmlinux.lds.S | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/wakeup_32.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/init_task.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 4 |
8 files changed, 12 insertions, 9 deletions
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore new file mode 100644 index 0000000..0280790 --- /dev/null +++ b/arch/x86/.gitignore @@ -0,0 +1,3 @@ +boot/compressed/vmlinux +tools/test_get_len + diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index bcbd36c..5c22812 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -77,7 +77,7 @@ int main(int argc, char *argv[]) offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ - printf(".section \".rodata.compressed\",\"a\",@progbits\n"); + printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index a6f1a59..5ddabce 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -26,8 +26,8 @@ SECTIONS HEAD_TEXT _ehead = . ; } - .rodata.compressed : { - *(.rodata.compressed) + .rodata..compressed : { + *(.rodata..compressed) } .text : { _text = .; /* Text */ diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 2f9047c..48f99f1 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -7,7 +7,7 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 8ded418..13ab720 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -1,4 +1,4 @@ - .section .text.page_aligned + .section .text..page_aligned #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3a54dcb..43e9ccf 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned + * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a867940..cf59276 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -247,7 +247,7 @@ void __init setup_per_cpu_areas(void) #endif #endif /* - * Up to this point, the boot CPU has been using .data.init + * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. */ if (cpu == boot_cpu_id) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2cc2497..d0bb522 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -97,7 +97,7 @@ SECTIONS HEAD_TEXT #ifdef CONFIG_X86_32 . = ALIGN(PAGE_SIZE); - *(.text.page_aligned) + *(.text..page_aligned) #endif . = ALIGN(8); _stext = .; @@ -305,7 +305,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; - *(.bss.page_aligned) + *(.bss..page_aligned) *(.bss) . = ALIGN(4); __bss_stop = .; |