diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-06-10 21:45:45 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-18 10:27:03 (GMT) |
commit | 0db125c467afcbcc229abb1a87bc36ef72777dc2 (patch) | |
tree | afb52caf91eb9db5115355c7163fdf11bb4bec0a | |
parent | e6e07d8a2d2989c1f42287131308aa2fde253631 (diff) | |
download | linux-0db125c467afcbcc229abb1a87bc36ef72777dc2.tar.xz |
x86: more header fixes
Summary: Add missing include guards for some x86 headers.
This has only had the most rudimentary testing, but is hopefully obviously
correct.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/asm-x86/seccomp_64.h | 1 | ||||
-rw-r--r-- | include/asm-x86/suspend_32.h | 5 | ||||
-rw-r--r-- | include/asm-x86/xor_32.h | 5 | ||||
-rw-r--r-- | include/asm-x86/xor_64.h | 5 |
4 files changed, 16 insertions, 0 deletions
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h index 553af65..76cfe69 100644 --- a/include/asm-x86/seccomp_64.h +++ b/include/asm-x86/seccomp_64.h @@ -1,4 +1,5 @@ #ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H #include <linux/thread_info.h> diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 24e1c08..8675c67 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h @@ -3,6 +3,9 @@ * Based on code * Copyright 2001 Patrick Mochel <mochel@osdl.org> */ +#ifndef __ASM_X86_32_SUSPEND_H +#define __ASM_X86_32_SUSPEND_H + #include <asm/desc.h> #include <asm/i387.h> @@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point) /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); #endif + +#endif /* __ASM_X86_32_SUSPEND_H */ diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index 067b5c1..921b458 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h @@ -1,3 +1,6 @@ +#ifndef ASM_X86__XOR_32_H +#define ASM_X86__XOR_32_H + /* * Optimized RAID-5 checksumming functions for MMX and SSE. * @@ -881,3 +884,5 @@ do { \ deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) + +#endif /* ASM_X86__XOR_32_H */ diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index 24957e3..2d3a18d 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h @@ -1,3 +1,6 @@ +#ifndef ASM_X86__XOR_64_H +#define ASM_X86__XOR_64_H + /* * Optimized RAID-5 checksumming functions for MMX and SSE. * @@ -354,3 +357,5 @@ do { \ We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) + +#endif /* ASM_X86__XOR_64_H */ |