summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-11 18:47:30 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-11 18:51:16 (GMT)
commitead9d23d803ea3a73766c3cb27bf7563ac8d7266 (patch)
tree42225fadd0d5388bf21d1658e56879e14f23e013 /include
parentbf6f51e3a46f6a602853d3cbacd05864bc6e2a37 (diff)
parent0afe2db21394820d32646a695eccf3fbfe6ab5c7 (diff)
downloadlinux-fsl-qoriq-ead9d23d803ea3a73766c3cb27bf7563ac8d7266.tar.xz
Merge phase #4 (X2APIC, APIC unification, CPU identification unification) of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-v28-for-linus-phase4-D' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (186 commits) x86, debug: print more information about unknown CPUs x86 setup: handle more than 8 CPU flag words x86: cpuid, fix typo x86: move transmeta cap read to early_init_transmeta() x86: identify_cpu_without_cpuid v2 x86: extended "flags" to show virtualization HW feature in /proc/cpuinfo x86: move VMX MSRs to msr-index.h x86: centaur_64.c remove duplicated setting of CONSTANT_TSC x86: intel.c put workaround for old cpus together x86: let intel 64-bit use intel.c x86: make intel_64.c the same as intel.c x86: make intel.c have 64-bit support code x86: little clean up of intel.c/intel_64.c x86: make 64 bit to use amd.c x86: make amd_64 have 32 bit code x86: make amd.c have 64bit support code x86: merge header in amd_64.c x86: add srat_detect_node for amd64 x86: remove duplicated force_mwait x86: cpu make amd.c more like amd_64.c v2 ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/asm-x86/apic.h65
-rw-r--r--include/asm-x86/apicdef.h3
-rw-r--r--include/asm-x86/arch_hooks.h2
-rw-r--r--include/asm-x86/bigsmp/apic.h (renamed from include/asm-x86/mach-bigsmp/mach_apic.h)10
-rw-r--r--include/asm-x86/bigsmp/apicdef.h13
-rw-r--r--include/asm-x86/bigsmp/ipi.h (renamed from include/asm-x86/mach-summit/mach_ipi.h)6
-rw-r--r--include/asm-x86/bugs.h5
-rw-r--r--include/asm-x86/cpufeature.h118
-rw-r--r--include/asm-x86/e820.h2
-rw-r--r--include/asm-x86/es7000/apic.h (renamed from include/asm-x86/mach-es7000/mach_apic.h)32
-rw-r--r--include/asm-x86/es7000/apicdef.h13
-rw-r--r--include/asm-x86/es7000/ipi.h (renamed from include/asm-x86/mach-es7000/mach_ipi.h)6
-rw-r--r--include/asm-x86/es7000/mpparse.h (renamed from include/asm-x86/mach-es7000/mach_mpparse.h)6
-rw-r--r--include/asm-x86/es7000/wakecpu.h (renamed from include/asm-x86/mach-es7000/mach_wakecpu.h)8
-rw-r--r--include/asm-x86/genapic_64.h8
-rw-r--r--include/asm-x86/hw_irq.h3
-rw-r--r--include/asm-x86/i387.h84
-rw-r--r--include/asm-x86/i8259.h3
-rw-r--r--include/asm-x86/io_apic.h20
-rw-r--r--include/asm-x86/ipi.h16
-rw-r--r--include/asm-x86/irq_remapping.h8
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apicdef.h13
-rw-r--r--include/asm-x86/mach-default/mach_apic.h4
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_apicdef.h13
-rw-r--r--include/asm-x86/mach-numaq/mach_mpparse.h7
-rw-r--r--include/asm-x86/mach-summit/mach_apicdef.h13
-rw-r--r--include/asm-x86/mpspec.h3
-rw-r--r--include/asm-x86/msidef.h4
-rw-r--r--include/asm-x86/msr-index.h16
-rw-r--r--include/asm-x86/numaq/apic.h (renamed from include/asm-x86/mach-numaq/mach_apic.h)6
-rw-r--r--include/asm-x86/numaq/apicdef.h (renamed from include/asm-x86/mach-numaq/mach_apicdef.h)6
-rw-r--r--include/asm-x86/numaq/ipi.h (renamed from include/asm-x86/mach-numaq/mach_ipi.h)6
-rw-r--r--include/asm-x86/numaq/mpparse.h7
-rw-r--r--include/asm-x86/numaq/wakecpu.h (renamed from include/asm-x86/mach-numaq/mach_wakecpu.h)6
-rw-r--r--include/asm-x86/paravirt.h19
-rw-r--r--include/asm-x86/processor-cyrix.h8
-rw-r--r--include/asm-x86/processor-flags.h1
-rw-r--r--include/asm-x86/processor.h27
-rw-r--r--include/asm-x86/setup.h1
-rw-r--r--include/asm-x86/sigcontext.h87
-rw-r--r--include/asm-x86/sigcontext32.h6
-rw-r--r--include/asm-x86/smp.h17
-rw-r--r--include/asm-x86/summit/apic.h (renamed from include/asm-x86/mach-summit/mach_apic.h)24
-rw-r--r--include/asm-x86/summit/apicdef.h13
-rw-r--r--include/asm-x86/summit/ipi.h (renamed from include/asm-x86/mach-bigsmp/mach_ipi.h)6
-rw-r--r--include/asm-x86/summit/irq_vectors_limits.h (renamed from include/asm-x86/mach-summit/irq_vectors_limits.h)6
-rw-r--r--include/asm-x86/summit/mpparse.h (renamed from include/asm-x86/mach-summit/mach_mpparse.h)13
-rw-r--r--include/asm-x86/thread_info.h1
-rw-r--r--include/asm-x86/ucontext.h6
-rw-r--r--include/asm-x86/xcr.h49
-rw-r--r--include/asm-x86/xsave.h118
-rw-r--r--include/linux/dmar.h127
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/percpu.h7
57 files changed, 833 insertions, 249 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index cb752ba..7440a0d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -385,6 +385,7 @@
. = ALIGN(align); \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ *(.data.percpu.page_aligned) \
*(.data.percpu) \
*(.data.percpu.shared_aligned) \
} \
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 65590c9..d76a083 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -9,6 +9,8 @@
#include <asm/apicdef.h>
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -47,8 +49,6 @@ extern int disable_apic;
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define apic_write native_apic_write
-#define apic_read native_apic_read
#define setup_boot_clock setup_boot_APIC_clock
#define setup_secondary_clock setup_secondary_APIC_clock
#endif
@@ -60,7 +60,7 @@ extern u64 xapic_icr_read(void);
extern void xapic_icr_write(u32, u32);
extern int setup_profiling_timer(unsigned int);
-static inline void native_apic_write(unsigned long reg, u32 v)
+static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
@@ -69,15 +69,68 @@ static inline void native_apic_write(unsigned long reg, u32 v)
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
-static inline u32 native_apic_read(unsigned long reg)
+static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
}
-extern void apic_wait_icr_idle(void);
-extern u32 safe_apic_wait_icr_idle(void);
+static inline void native_apic_msr_write(u32 reg, u32 v)
+{
+ if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
+ reg == APIC_LVR)
+ return;
+
+ wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
+}
+
+static inline u32 native_apic_msr_read(u32 reg)
+{
+ u32 low, high;
+
+ if (reg == APIC_DFR)
+ return -1;
+
+ rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
+ return low;
+}
+
+#ifndef CONFIG_X86_32
+extern int x2apic, x2apic_preenabled;
+extern void check_x2apic(void);
+extern void enable_x2apic(void);
+extern void enable_IR_x2apic(void);
+extern void x2apic_icr_write(u32 low, u32 id);
+#endif
+
+struct apic_ops {
+ u32 (*read)(u32 reg);
+ void (*write)(u32 reg, u32 v);
+ u64 (*icr_read)(void);
+ void (*icr_write)(u32 low, u32 high);
+ void (*wait_icr_idle)(void);
+ u32 (*safe_wait_icr_idle)(void);
+};
+
+extern struct apic_ops *apic_ops;
+
+#define apic_read (apic_ops->read)
+#define apic_write (apic_ops->write)
+#define apic_icr_read (apic_ops->icr_read)
+#define apic_icr_write (apic_ops->icr_write)
+#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
+#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
+
extern int get_physical_broadcast(void);
+#ifdef CONFIG_X86_64
+static inline void ack_x2APIC_irq(void)
+{
+ /* Docs say use 0 for future compatibility */
+ native_apic_msr_write(APIC_EOI, 0);
+}
+#endif
+
+
static inline void ack_APIC_irq(void)
{
/*
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index c40687d..b922c85 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -105,6 +105,7 @@
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
+#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
@@ -128,6 +129,8 @@
#define APIC_EILVT3 0x530
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+#define APIC_BASE_MSR 0x800
+#define X2APIC_ENABLE (1UL << 10)
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h
index 72adc3a..de4596b 100644
--- a/include/asm-x86/arch_hooks.h
+++ b/include/asm-x86/arch_hooks.h
@@ -12,8 +12,6 @@
/* these aren't arch hooks, they are generic routines
* that can be used by the hooks */
extern void init_ISA_irqs(void);
-extern void apic_intr_init(void);
-extern void smp_intr_init(void);
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
/* these are the defined hooks */
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/bigsmp/apic.h
index 05362d4..0a9cd7c 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/bigsmp/apic.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_BIGSMP__MACH_APIC_H
-#define ASM_X86__MACH_BIGSMP__MACH_APIC_H
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
#define esr_disable (1)
@@ -11,7 +11,7 @@ static inline int apic_id_registered(void)
/* Round robin the irqs amoung the online cpus */
static inline cpumask_t target_cpus(void)
-{
+{
static unsigned long cpu = NR_CPUS;
do {
if (cpu >= NR_CPUS)
@@ -23,7 +23,7 @@ static inline cpumask_t target_cpus(void)
}
#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL 0
+#define APIC_DEST_LOGICAL 0
#define TARGET_CPUS (target_cpus())
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
@@ -141,4 +141,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
return cpuid_apic >> index_msb;
}
-#endif /* ASM_X86__MACH_BIGSMP__MACH_APIC_H */
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/bigsmp/apicdef.h b/include/asm-x86/bigsmp/apicdef.h
new file mode 100644
index 0000000..392c3f5
--- /dev/null
+++ b/include/asm-x86/bigsmp/apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+#define APIC_ID_MASK (0xFF<<24)
+
+static inline unsigned get_apic_id(unsigned long x)
+{
+ return (((x)>>24)&0xFF);
+}
+
+#define GET_APIC_ID(x) get_apic_id(x)
+
+#endif
diff --git a/include/asm-x86/mach-summit/mach_ipi.h b/include/asm-x86/bigsmp/ipi.h
index a3b31c5..9404c53 100644
--- a/include/asm-x86/mach-summit/mach_ipi.h
+++ b/include/asm-x86/bigsmp/ipi.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_SUMMIT__MACH_IPI_H
-#define ASM_X86__MACH_SUMMIT__MACH_IPI_H
+#ifndef __ASM_MACH_IPI_H
+#define __ASM_MACH_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector);
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
send_IPI_mask(cpu_online_map, vector);
}
-#endif /* ASM_X86__MACH_SUMMIT__MACH_IPI_H */
+#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index 4761c46..dc60498 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -2,6 +2,11 @@
#define ASM_X86__BUGS_H
extern void check_bugs(void);
+
+#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
int ppro_with_ram_bug(void);
+#else
+static inline int ppro_with_ram_bug(void) { return 0; }
+#endif
#endif /* ASM_X86__BUGS_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 250fa0c..adfeae6 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -6,7 +6,13 @@
#include <asm/required-features.h>
-#define NCAPINTS 8 /* N 32-bit words worth of info */
+#define NCAPINTS 9 /* N 32-bit words worth of info */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name. If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
@@ -14,7 +20,7 @@
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
@@ -23,22 +29,23 @@
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
+#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */
+ /* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DS (0*32+21) /* Debug Store */
+#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
+#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
- /* of FPU context), and CR4.OSFXSR available */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
+#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM (0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
+#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
@@ -46,7 +53,8 @@
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */
+#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
@@ -64,53 +72,79 @@
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
-#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
-#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
-#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
-#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
+#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
-#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
-#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
-#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
+#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
+#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
+#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
-#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
-#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
+#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID (4*32+10) /* Context ID */
+#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */
+#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
+#define X86_FEATURE_AES (4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
-#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
-#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
-#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
+#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
-#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
+#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */
+#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
+#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -118,6 +152,13 @@
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
+/* Virtualization flags: Linux defined */
+#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
+
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <linux/bitops.h>
@@ -151,7 +192,7 @@ extern const char * const x86_power_flags[32];
} while (0)
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
- clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
+ clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
@@ -192,7 +233,10 @@ extern const char * const x86_power_flags[32];
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
+#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
+#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
+#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index f52daf1..5abbdec 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -43,6 +43,7 @@
#define E820_RESERVED 2
#define E820_ACPI 3
#define E820_NVS 4
+#define E820_UNUSABLE 5
/* reserved RAM used by kernel itself */
#define E820_RESERVED_KERN 128
@@ -121,6 +122,7 @@ extern void e820_register_active_regions(int nid, unsigned long start_pfn,
extern u64 e820_hole_size(u64 start, u64 end);
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
+extern void e820_reserve_resources_late(void);
extern void setup_memory_map(void);
extern char *default_machine_specific_memory_setup(void);
extern char *machine_specific_memory_setup(void);
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/es7000/apic.h
index c1f6f68..bd2c44d 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/es7000/apic.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_ES7000__MACH_APIC_H
-#define ASM_X86__MACH_ES7000__MACH_APIC_H
+#ifndef __ASM_ES7000_APIC_H
+#define __ASM_ES7000_APIC_H
#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
#define esr_disable (1)
@@ -10,7 +10,7 @@ static inline int apic_id_registered(void)
}
static inline cpumask_t target_cpus(void)
-{
+{
#if defined CONFIG_ES7000_CLUSTERED_APIC
return CPU_MASK_ALL;
#else
@@ -23,24 +23,24 @@ static inline cpumask_t target_cpus(void)
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
#define INT_DELIVERY_MODE (dest_LowestPrio)
#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */
-#define NO_BALANCE_IRQ (1)
+#define NO_BALANCE_IRQ (1)
#undef WAKE_SECONDARY_VIA_INIT
#define WAKE_SECONDARY_VIA_MIP
#else
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target procs */
-#define NO_BALANCE_IRQ (0)
+#define NO_BALANCE_IRQ (0)
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0x0
#define WAKE_SECONDARY_VIA_INIT
#endif
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
+{
return 0;
-}
-static inline unsigned long check_apicid_present(int bit)
+}
+static inline unsigned long check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
@@ -80,7 +80,7 @@ static inline void setup_apic_routing(void)
{
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
- (apic_version[apic] == 0x14) ?
+ (apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
}
@@ -141,7 +141,7 @@ static inline void setup_portio_remap(void)
extern unsigned int boot_cpu_physical_apicid;
static inline int check_phys_apicid_present(int cpu_physical_apicid)
{
- boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+ boot_cpu_physical_apicid = read_apic_id();
return (1);
}
@@ -150,7 +150,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
int num_bits_set;
int cpus_found = 0;
int cpu;
- int apicid;
+ int apicid;
num_bits_set = cpus_weight(cpumask);
/* Return id to all */
@@ -160,16 +160,16 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
#else
return cpu_to_logical_apicid(0);
#endif
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
+ /*
+ * The cpus in the mask must all be on the apic cluster. If are not
+ * on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
+ if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n",__FUNCTION__);
#if defined CONFIG_ES7000_CLUSTERED_APIC
@@ -191,4 +191,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
return cpuid_apic >> index_msb;
}
-#endif /* ASM_X86__MACH_ES7000__MACH_APIC_H */
+#endif /* __ASM_ES7000_APIC_H */
diff --git a/include/asm-x86/es7000/apicdef.h b/include/asm-x86/es7000/apicdef.h
new file mode 100644
index 0000000..8b234a3
--- /dev/null
+++ b/include/asm-x86/es7000/apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ES7000_APICDEF_H
+#define __ASM_ES7000_APICDEF_H
+
+#define APIC_ID_MASK (0xFF<<24)
+
+static inline unsigned get_apic_id(unsigned long x)
+{
+ return (((x)>>24)&0xFF);
+}
+
+#define GET_APIC_ID(x) get_apic_id(x)
+
+#endif
diff --git a/include/asm-x86/mach-es7000/mach_ipi.h b/include/asm-x86/es7000/ipi.h
index 3a21240..632a955 100644
--- a/include/asm-x86/mach-es7000/mach_ipi.h
+++ b/include/asm-x86/es7000/ipi.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_ES7000__MACH_IPI_H
-#define ASM_X86__MACH_ES7000__MACH_IPI_H
+#ifndef __ASM_ES7000_IPI_H
+#define __ASM_ES7000_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector);
@@ -21,4 +21,4 @@ static inline void send_IPI_all(int vector)
send_IPI_mask(cpu_online_map, vector);
}
-#endif /* ASM_X86__MACH_ES7000__MACH_IPI_H */
+#endif /* __ASM_ES7000_IPI_H */
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/es7000/mpparse.h
index befde24..7b5c889 100644
--- a/include/asm-x86/mach-es7000/mach_mpparse.h
+++ b/include/asm-x86/es7000/mpparse.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_ES7000__MACH_MPPARSE_H
-#define ASM_X86__MACH_ES7000__MACH_MPPARSE_H
+#ifndef __ASM_ES7000_MPPARSE_H
+#define __ASM_ES7000_MPPARSE_H
#include <linux/acpi.h>
@@ -26,4 +26,4 @@ static inline int es7000_check_dsdt(void)
}
#endif
-#endif /* ASM_X86__MACH_ES7000__MACH_MPPARSE_H */
+#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-es7000/mach_wakecpu.h b/include/asm-x86/es7000/wakecpu.h
index 97c776c..3ffc5a7 100644
--- a/include/asm-x86/mach-es7000/mach_wakecpu.h
+++ b/include/asm-x86/es7000/wakecpu.h
@@ -1,7 +1,7 @@
-#ifndef ASM_X86__MACH_ES7000__MACH_WAKECPU_H
-#define ASM_X86__MACH_ES7000__MACH_WAKECPU_H
+#ifndef __ASM_ES7000_WAKECPU_H
+#define __ASM_ES7000_WAKECPU_H
-/*
+/*
* This file copes with machines that wakeup secondary CPUs by the
* INIT, INIT, STARTUP sequence.
*/
@@ -56,4 +56,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
#define inquire_remote_apic(apicid) {}
#endif
-#endif /* ASM_X86__MACH_ES7000__MACH_WAKECPU_H */
+#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index 25097a8..ed6a488 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -14,6 +14,7 @@
struct genapic {
char *name;
+ int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
u32 int_delivery_mode;
u32 int_dest_mode;
int (*apic_id_registered)(void);
@@ -24,17 +25,24 @@ struct genapic {
void (*send_IPI_mask)(cpumask_t mask, int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
+ void (*send_IPI_self)(int vector);
/* */
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
unsigned int (*phys_pkg_id)(int index_msb);
+ unsigned int (*get_apic_id)(unsigned long x);
+ unsigned long (*set_apic_id)(unsigned int id);
+ unsigned long apic_id_mask;
};
extern struct genapic *genapic;
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
+extern struct genapic apic_x2apic_cluster;
+extern struct genapic apic_x2apic_phys;
extern int acpi_madt_oem_check(char *, char *);
+extern void apic_send_IPI_self(int vector);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 65997b1..50f6e03 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -64,7 +64,6 @@ extern unsigned long io_apic_irqs;
extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
-extern void print_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
extern void setup_ioapic_dest(void);
@@ -73,7 +72,9 @@ extern void enable_IO_APIC(void);
#endif
/* IPI functions */
+#ifdef CONFIG_X86_32
extern void send_IPI_self(int vector);
+#endif
extern void send_IPI(int dest, int vector);
/* Statistics */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 1ecdc3e..9ba862a 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -19,7 +19,9 @@
#include <asm/sigcontext.h>
#include <asm/user.h>
#include <asm/uaccess.h>
+#include <asm/xsave.h>
+extern unsigned int sig_xstate_size;
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
@@ -31,12 +33,18 @@ extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
+extern struct _fpx_sw_bytes fx_sw_reserved;
#ifdef CONFIG_IA32_EMULATION
+extern unsigned int sig_xstate_ia32_size;
+extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
struct _fpstate_ia32;
-extern int save_i387_ia32(struct _fpstate_ia32 __user *buf);
-extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf);
+struct _xstate_ia32;
+extern int save_i387_xstate_ia32(void __user *buf);
+extern int restore_i387_xstate_ia32(void __user *buf);
#endif
+#define X87_FSW_ES (1 << 7) /* Exception Summary */
+
#ifdef CONFIG_X86_64
/* Ignore delayed exceptions from user space */
@@ -47,7 +55,7 @@ static inline void tolerant_fwait(void)
_ASM_EXTABLE(1b, 2b));
}
-static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
+static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
int err;
@@ -67,15 +75,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
return err;
}
-#define X87_FSW_ES (1 << 7) /* Exception Summary */
+static inline int restore_fpu_checking(struct task_struct *tsk)
+{
+ if (task_thread_info(tsk)->status & TS_XSAVE)
+ return xrstor_checking(&tsk->thread.xstate->xsave);
+ else
+ return fxrstor_checking(&tsk->thread.xstate->fxsave);
+}
/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
-static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
+static inline void clear_fpu_state(struct task_struct *tsk)
{
+ struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
+ struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+
+ /*
+ * xsave header may indicate the init state of the FP.
+ */
+ if ((task_thread_info(tsk)->status & TS_XSAVE) &&
+ !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
+ return;
+
if (unlikely(fx->swd & X87_FSW_ES))
asm volatile("fnclex");
alternative_input(ASM_NOP8 ASM_NOP2,
@@ -84,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
X86_FEATURE_FXSAVE_LEAK);
}
-static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
+static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
@@ -108,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
return err;
}
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fxsave(struct task_struct *tsk)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
@@ -133,7 +157,16 @@ static inline void __save_init_fpu(struct task_struct *tsk)
: "=m" (tsk->thread.xstate->fxsave)
: "cdaSDb" (&tsk->thread.xstate->fxsave));
#endif
- clear_fpu_state(&tsk->thread.xstate->fxsave);
+}
+
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ if (task_thread_info(tsk)->status & TS_XSAVE)
+ xsave(tsk);
+ else
+ fxsave(tsk);
+
+ clear_fpu_state(tsk);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
@@ -148,6 +181,10 @@ static inline void tolerant_fwait(void)
static inline void restore_fpu(struct task_struct *tsk)
{
+ if (task_thread_info(tsk)->status & TS_XSAVE) {
+ xrstor_checking(&tsk->thread.xstate->xsave);
+ return;
+ }
/*
* The "nop" is needed to make the instructions the same
* length.
@@ -173,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk)
*/
static inline void __save_init_fpu(struct task_struct *tsk)
{
+ if (task_thread_info(tsk)->status & TS_XSAVE) {
+ struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
+ struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+
+ xsave(tsk);
+
+ /*
+ * xsave header may indicate the init state of the FP.
+ */
+ if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
+ goto end;
+
+ if (unlikely(fx->swd & X87_FSW_ES))
+ asm volatile("fnclex");
+
+ /*
+ * we can do a simple return here or be paranoid :)
+ */
+ goto clear_state;
+ }
+
/* Use more nops than strictly needed in case the compiler
varies code */
alternative_input(
@@ -182,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk)
X86_FEATURE_FXSR,
[fx] "m" (tsk->thread.xstate->fxsave),
[fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
+clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
@@ -191,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk)
"fildl %[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
+end:
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
+#endif /* CONFIG_X86_64 */
+
/*
* Signal frame handlers...
*/
-extern int save_i387(struct _fpstate __user *buf);
-extern int restore_i387(struct _fpstate __user *buf);
-
-#endif /* CONFIG_X86_64 */
+extern int save_i387_xstate(void __user *buf);
+extern int restore_i387_xstate(void __user *buf);
static inline void __unlazy_fpu(struct task_struct *tsk)
{
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index c586559..23c1b3b 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -57,4 +57,7 @@ static inline void outb_pic(unsigned char value, unsigned int port)
extern struct irq_chip i8259A_chip;
+extern void mask_8259A(void);
+extern void unmask_8259A(void);
+
#endif /* ASM_X86__I8259_H */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index be62847..8ec68a5 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -107,6 +107,20 @@ struct IO_APIC_route_entry {
} __attribute__ ((packed));
+struct IR_IO_APIC_route_entry {
+ __u64 vector : 8,
+ zero : 3,
+ index2 : 1,
+ delivery_status : 1,
+ polarity : 1,
+ irr : 1,
+ trigger : 1,
+ mask : 1,
+ reserved : 31,
+ format : 1,
+ index : 15;
+} __attribute__ ((packed));
+
#ifdef CONFIG_X86_IO_APIC
/*
@@ -183,6 +197,12 @@ extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
+#ifdef CONFIG_X86_64
+extern int save_mask_IO_APIC_setup(void);
+extern void restore_IO_APIC_setup(void);
+extern void reinit_intr_remapped_IO_APIC(int);
+#endif
+
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index c1b2267..30a692c 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -49,6 +49,12 @@ static inline int __prepare_ICR2(unsigned int mask)
return SET_APIC_DEST_FIELD(mask);
}
+static inline void __xapic_wait_icr_idle(void)
+{
+ while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
+ cpu_relax();
+}
+
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
unsigned int dest)
{
@@ -64,7 +70,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
/*
* Wait for idle.
*/
- apic_wait_icr_idle();
+ __xapic_wait_icr_idle();
/*
* No need to touch the target chip field
@@ -74,7 +80,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
- apic_write(APIC_ICR, cfg);
+ native_apic_mem_write(APIC_ICR, cfg);
}
/*
@@ -92,13 +98,13 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
if (unlikely(vector == NMI_VECTOR))
safe_apic_wait_icr_idle();
else
- apic_wait_icr_idle();
+ __xapic_wait_icr_idle();
/*
* prepare target chip field
*/
cfg = __prepare_ICR2(mask);
- apic_write(APIC_ICR2, cfg);
+ native_apic_mem_write(APIC_ICR2, cfg);
/*
* program the ICR
@@ -108,7 +114,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
- apic_write(APIC_ICR, cfg);
+ native_apic_mem_write(APIC_ICR, cfg);
}
static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
diff --git a/include/asm-x86/irq_remapping.h b/include/asm-x86/irq_remapping.h
new file mode 100644
index 0000000..78242c6
--- /dev/null
+++ b/include/asm-x86/irq_remapping.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_IRQ_REMAPPING_H
+#define _ASM_IRQ_REMAPPING_H
+
+extern int x2apic;
+
+#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
+
+#endif
diff --git a/include/asm-x86/mach-bigsmp/mach_apicdef.h b/include/asm-x86/mach-bigsmp/mach_apicdef.h
deleted file mode 100644
index 811935d..0000000
--- a/include/asm-x86/mach-bigsmp/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
-#define ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif /* ASM_X86__MACH_BIGSMP__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index b615f40..2a330a4 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -30,6 +30,8 @@ static inline cpumask_t target_cpus(void)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define phys_pkg_id (genapic->phys_pkg_id)
#define vector_allocation_domain (genapic->vector_allocation_domain)
+#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
+#define send_IPI_self (genapic->send_IPI_self)
extern void setup_apic_routing(void);
#else
#define INT_DELIVERY_MODE dest_LowestPrio
@@ -54,7 +56,7 @@ static inline void init_apic_ldr(void)
static inline int apic_id_registered(void)
{
- return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
+ return physid_isset(read_apic_id(), phys_cpu_present_map);
}
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index 936704f..0c2d41c 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -4,9 +4,9 @@
#include <asm/apic.h>
#ifdef CONFIG_X86_64
-#define APIC_ID_MASK (0xFFu<<24)
-#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
-#define SET_APIC_ID(x) (((x)<<24))
+#define APIC_ID_MASK (genapic->apic_id_mask)
+#define GET_APIC_ID(x) (genapic->get_apic_id(x))
+#define SET_APIC_ID(x) (genapic->set_apic_id(x))
#else
#define APIC_ID_MASK (0xF<<24)
static inline unsigned get_apic_id(unsigned long x)
diff --git a/include/asm-x86/mach-es7000/mach_apicdef.h b/include/asm-x86/mach-es7000/mach_apicdef.h
deleted file mode 100644
index a07e567..0000000
--- a/include/asm-x86/mach-es7000/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ASM_X86__MACH_ES7000__MACH_APICDEF_H
-#define ASM_X86__MACH_ES7000__MACH_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif /* ASM_X86__MACH_ES7000__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
deleted file mode 100644
index 74ade18..0000000
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H
-#define ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H
-
-extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
- char *productid);
-
-#endif /* ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-summit/mach_apicdef.h b/include/asm-x86/mach-summit/mach_apicdef.h
deleted file mode 100644
index d4bc859..0000000
--- a/include/asm-x86/mach-summit/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
-#define ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif /* ASM_X86__MACH_SUMMIT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 118da36..be2241a 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -5,11 +5,12 @@
#include <asm/mpspec_def.h>
+extern int apic_version[MAX_APICS];
+
#ifdef CONFIG_X86_32
#include <mach_mpspec.h>
extern unsigned int def_to_bigsmp;
-extern int apic_version[MAX_APICS];
extern u8 apicid_2_node[];
extern int pic_mode;
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 3139666..ed91902 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -48,4 +48,8 @@
#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
MSI_ADDR_DEST_ID_MASK)
+#define MSI_ADDR_IR_EXT_INT (1 << 4)
+#define MSI_ADDR_IR_SHV (1 << 3)
+#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13)
+#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5)
#endif /* ASM_X86__MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 3052f05..0bb4330 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -176,6 +176,7 @@
#define MSR_IA32_TSC 0x00000010
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
@@ -310,4 +311,19 @@
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
+/* Intel VT MSRs */
+#define MSR_IA32_VMX_BASIC 0x00000480
+#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
+#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
+#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
+#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
+#define MSR_IA32_VMX_MISC 0x00000485
+#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
+#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
+#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
+#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
+#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
+#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
+#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+
#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/numaq/apic.h
index 7a0d39e..a8344ba 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/numaq/apic.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_NUMAQ__MACH_APIC_H
-#define ASM_X86__MACH_NUMAQ__MACH_APIC_H
+#ifndef __ASM_NUMAQ_APIC_H
+#define __ASM_NUMAQ_APIC_H
#include <asm/io.h>
#include <linux/mmzone.h>
@@ -135,4 +135,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
return cpuid_apic >> index_msb;
}
-#endif /* ASM_X86__MACH_NUMAQ__MACH_APIC_H */
+#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/include/asm-x86/mach-numaq/mach_apicdef.h b/include/asm-x86/numaq/apicdef.h
index f870ec5..e012a46 100644
--- a/include/asm-x86/mach-numaq/mach_apicdef.h
+++ b/include/asm-x86/numaq/apicdef.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_NUMAQ__MACH_APICDEF_H
-#define ASM_X86__MACH_NUMAQ__MACH_APICDEF_H
+#ifndef __ASM_NUMAQ_APICDEF_H
+#define __ASM_NUMAQ_APICDEF_H
#define APIC_ID_MASK (0xF<<24)
@@ -11,4 +11,4 @@ static inline unsigned get_apic_id(unsigned long x)
#define GET_APIC_ID(x) get_apic_id(x)
-#endif /* ASM_X86__MACH_NUMAQ__MACH_APICDEF_H */
+#endif
diff --git a/include/asm-x86/mach-numaq/mach_ipi.h b/include/asm-x86/numaq/ipi.h
index 1e83582..935588d 100644
--- a/include/asm-x86/mach-numaq/mach_ipi.h
+++ b/include/asm-x86/numaq/ipi.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_NUMAQ__MACH_IPI_H
-#define ASM_X86__MACH_NUMAQ__MACH_IPI_H
+#ifndef __ASM_NUMAQ_IPI_H
+#define __ASM_NUMAQ_IPI_H
void send_IPI_mask_sequence(cpumask_t, int vector);
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
send_IPI_mask(cpu_online_map, vector);
}
-#endif /* ASM_X86__MACH_NUMAQ__MACH_IPI_H */
+#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/include/asm-x86/numaq/mpparse.h b/include/asm-x86/numaq/mpparse.h
new file mode 100644
index 0000000..252292e
--- /dev/null
+++ b/include/asm-x86/numaq/mpparse.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_NUMAQ_MPPARSE_H
+#define __ASM_NUMAQ_MPPARSE_H
+
+extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
+ char *productid);
+
+#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/include/asm-x86/mach-numaq/mach_wakecpu.h b/include/asm-x86/numaq/wakecpu.h
index 0db8cea..c577bda 100644
--- a/include/asm-x86/mach-numaq/mach_wakecpu.h
+++ b/include/asm-x86/numaq/wakecpu.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H
-#define ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H
+#ifndef __ASM_NUMAQ_WAKECPU_H
+#define __ASM_NUMAQ_WAKECPU_H
/* This file copes with machines that wakeup secondary CPUs by NMIs */
@@ -40,4 +40,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
#define inquire_remote_apic(apicid) {}
-#endif /* ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H */
+#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 891971f..d7d358a 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -201,12 +201,6 @@ struct pv_irq_ops {
struct pv_apic_ops {
#ifdef CONFIG_X86_LOCAL_APIC
- /*
- * Direct APIC operations, principally for VMI. Ideally
- * these shouldn't be in this interface.
- */
- void (*apic_write)(unsigned long reg, u32 v);
- u32 (*apic_read)(unsigned long reg);
void (*setup_boot_clock)(void);
void (*setup_secondary_clock)(void);
@@ -910,19 +904,6 @@ static inline void slow_down_io(void)
}
#ifdef CONFIG_X86_LOCAL_APIC
-/*
- * Basic functions accessing APICs.
- */
-static inline void apic_write(unsigned long reg, u32 v)
-{
- PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
-}
-
-static inline u32 apic_read(unsigned long reg)
-{
- return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
-}
-
static inline void setup_boot_clock(void)
{
PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h
index 97568ad..1198f2a 100644
--- a/include/asm-x86/processor-cyrix.h
+++ b/include/asm-x86/processor-cyrix.h
@@ -28,3 +28,11 @@ static inline void setCx86(u8 reg, u8 data)
outb(reg, 0x22);
outb(data, 0x23);
}
+
+#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86_old(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 5dd7977..dc5f071 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -59,6 +59,7 @@
#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
+#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
/*
* x86-64 Task Priority Register, CR8
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 5eaf9bf..c7d3546 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -76,11 +76,11 @@ struct cpuinfo_x86 {
int x86_tlbsize;
__u8 x86_virt_bits;
__u8 x86_phys_bits;
+#endif
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
-#endif
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level;
__u32 x86_capability[NCAPINTS];
@@ -166,11 +166,8 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
-#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
+extern void detect_extended_topology(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
-#else
-static inline void detect_ht(struct cpuinfo_x86 *c) {}
-#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
@@ -327,7 +324,12 @@ struct i387_fxsave_struct {
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
- u32 padding[24];
+ u32 padding[12];
+
+ union {
+ u32 padding1[12];
+ u32 sw_reserved[12];
+ };
} __attribute__((aligned(16)));
@@ -351,10 +353,23 @@ struct i387_soft_struct {
u32 entry_eip;
};
+struct xsave_hdr_struct {
+ u64 xstate_bv;
+ u64 reserved1[2];
+ u64 reserved2[5];
+} __attribute__((packed));
+
+struct xsave_struct {
+ struct i387_fxsave_struct i387;
+ struct xsave_hdr_struct xsave_hdr;
+ /* new processor state extensions will go here */
+} __attribute__ ((packed, aligned (64)));
+
union thread_xstate {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft;
+ struct xsave_struct xsave;
};
#ifdef CONFIG_X86_64
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index 9030cb7..11b6cc1 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -38,6 +38,7 @@ struct x86_quirks {
void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
unsigned short oemsize);
+ int (*setup_ioapic_ids)(void);
};
extern struct x86_quirks *x86_quirks;
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 24879c8..ee813f4 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -4,6 +4,40 @@
#include <linux/compiler.h>
#include <asm/types.h>
+#define FP_XSTATE_MAGIC1 0x46505853U
+#define FP_XSTATE_MAGIC2 0x46505845U
+#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
+
+/*
+ * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
+ * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
+ * are used to extended the fpstate pointer in the sigcontext, which now
+ * includes the extended state information along with fpstate information.
+ *
+ * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
+ * area and FP_XSTATE_MAGIC2 at the end of memory layout
+ * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
+ * extended state information in the memory layout pointed by the fpstate
+ * pointer in sigcontext.
+ */
+struct _fpx_sw_bytes {
+ __u32 magic1; /* FP_XSTATE_MAGIC1 */
+ __u32 extended_size; /* total size of the layout referred by
+ * fpstate pointer in the sigcontext.
+ */
+ __u64 xstate_bv;
+ /* feature bit mask (including fp/sse/extended
+ * state) that is present in the memory
+ * layout.
+ */
+ __u32 xstate_size; /* actual xsave state size, based on the
+ * features saved in the layout.
+ * 'extended_size' will be greater than
+ * 'xstate_size'.
+ */
+ __u32 padding[7]; /* for future use. */
+};
+
#ifdef __i386__
/*
* As documented in the iBCS2 standard..
@@ -53,7 +87,13 @@ struct _fpstate {
unsigned long reserved;
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg _xmm[8];
- unsigned long padding[56];
+ unsigned long padding1[44];
+
+ union {
+ unsigned long padding2[12];
+ struct _fpx_sw_bytes sw_reserved; /* represents the extended
+ * state info */
+ };
};
#define X86_FXSR_MAGIC 0x0000
@@ -79,7 +119,15 @@ struct sigcontext {
unsigned long flags;
unsigned long sp_at_signal;
unsigned short ss, __ssh;
- struct _fpstate __user *fpstate;
+
+ /*
+ * fpstate is really (struct _fpstate *) or (struct _xstate *)
+ * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
+ * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
+ * of extended memory layout. See comments at the defintion of
+ * (struct _fpx_sw_bytes)
+ */
+ void __user *fpstate; /* zero when no FPU/extended context */
unsigned long oldmask;
unsigned long cr2;
};
@@ -130,7 +178,12 @@ struct _fpstate {
__u32 mxcsr_mask;
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
- __u32 reserved2[24];
+ __u32 reserved2[12];
+ union {
+ __u32 reserved3[12];
+ struct _fpx_sw_bytes sw_reserved; /* represents the extended
+ * state information */
+ };
};
#ifdef __KERNEL__
@@ -161,7 +214,15 @@ struct sigcontext {
unsigned long trapno;
unsigned long oldmask;
unsigned long cr2;
- struct _fpstate __user *fpstate; /* zero when no FPU context */
+
+ /*
+ * fpstate is really (struct _fpstate *) or (struct _xstate *)
+ * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
+ * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
+ * of extended memory layout. See comments at the defintion of
+ * (struct _fpx_sw_bytes)
+ */
+ void __user *fpstate; /* zero when no FPU/extended context */
unsigned long reserved1[8];
};
#else /* __KERNEL__ */
@@ -202,4 +263,22 @@ struct sigcontext {
#endif /* !__i386__ */
+struct _xsave_hdr {
+ __u64 xstate_bv;
+ __u64 reserved1[2];
+ __u64 reserved2[5];
+};
+
+/*
+ * Extended state pointed by the fpstate pointer in the sigcontext.
+ * In addition to the fpstate, information encoded in the xstate_hdr
+ * indicates the presence of other extended state information
+ * supported by the processor and OS.
+ */
+struct _xstate {
+ struct _fpstate fpstate;
+ struct _xsave_hdr xstate_hdr;
+ /* new processor state extensions go here */
+};
+
#endif /* ASM_X86__SIGCONTEXT_H */
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 4e2ec73..8c34703 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -40,7 +40,11 @@ struct _fpstate_ia32 {
__u32 reserved;
struct _fpxreg _fxsr_st[8];
struct _xmmreg _xmm[8]; /* It's actually 16 */
- __u32 padding[56];
+ __u32 padding[44];
+ union {
+ __u32 padding2[12];
+ struct _fpx_sw_bytes sw_reserved;
+ };
};
struct sigcontext_ia32 {
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 04f84f4..29324c1 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -167,30 +167,33 @@ extern int safe_smp_processor_id(void);
#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef CONFIG_X86_64
static inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
}
-#ifndef CONFIG_X86_64
+#include <mach_apicdef.h>
static inline unsigned int read_apic_id(void)
{
- return *(u32 *)(APIC_BASE + APIC_ID);
+ unsigned int reg;
+
+ reg = *(u32 *)(APIC_BASE + APIC_ID);
+
+ return GET_APIC_ID(reg);
}
-#else
-extern unsigned int read_apic_id(void);
#endif
-# ifdef APIC_DEFINITION
+# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
extern int hard_smp_processor_id(void);
# else
-# include <mach_apicdef.h>
+#include <mach_apicdef.h>
static inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(read_apic_id());
+ return read_apic_id();
}
# endif /* APIC_DEFINITION */
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/summit/apic.h
index 7a66758..c5b2e4b 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/summit/apic.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_SUMMIT__MACH_APIC_H
-#define ASM_X86__MACH_SUMMIT__MACH_APIC_H
+#ifndef __ASM_SUMMIT_APIC_H
+#define __ASM_SUMMIT_APIC_H
#include <asm/smp.h>
@@ -21,7 +21,7 @@ static inline cpumask_t target_cpus(void)
* Just start on cpu 0. IRQ balancing will spread load
*/
return cpumask_of_cpu(0);
-}
+}
#define TARGET_CPUS (target_cpus())
#define INT_DELIVERY_MODE (dest_LowestPrio)
@@ -30,10 +30,10 @@ static inline cpumask_t target_cpus(void)
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return 0;
-}
+}
/* we don't use the phys_cpu_present_map to indicate apicid presence */
-static inline unsigned long check_apicid_present(int bit)
+static inline unsigned long check_apicid_present(int bit)
{
return 1;
}
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
static inline physid_mask_t apicid_to_cpu_present(int apicid)
{
- return physid_mask_of_physid(apicid);
+ return physid_mask_of_physid(0);
}
static inline void setup_portio_remap(void)
@@ -143,22 +143,22 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
int num_bits_set;
int cpus_found = 0;
int cpu;
- int apicid;
+ int apicid;
num_bits_set = cpus_weight(cpumask);
/* Return id to all */
if (num_bits_set == NR_CPUS)
return (int) 0xFF;
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
+ /*
+ * The cpus in the mask must all be on the apic cluster. If are not
+ * on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
+ if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n",__FUNCTION__);
return 0xFF;
@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
return hard_smp_processor_id() >> index_msb;
}
-#endif /* ASM_X86__MACH_SUMMIT__MACH_APIC_H */
+#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/include/asm-x86/summit/apicdef.h b/include/asm-x86/summit/apicdef.h
new file mode 100644
index 0000000..f3fbca1
--- /dev/null
+++ b/include/asm-x86/summit/apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_SUMMIT_APICDEF_H
+#define __ASM_SUMMIT_APICDEF_H
+
+#define APIC_ID_MASK (0xFF<<24)
+
+static inline unsigned get_apic_id(unsigned long x)
+{
+ return (x>>24)&0xFF;
+}
+
+#define GET_APIC_ID(x) get_apic_id(x)
+
+#endif
diff --git a/include/asm-x86/mach-bigsmp/mach_ipi.h b/include/asm-x86/summit/ipi.h
index b1b0f96..53bd1e7 100644
--- a/include/asm-x86/mach-bigsmp/mach_ipi.h
+++ b/include/asm-x86/summit/ipi.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_BIGSMP__MACH_IPI_H
-#define ASM_X86__MACH_BIGSMP__MACH_IPI_H
+#ifndef __ASM_SUMMIT_IPI_H
+#define __ASM_SUMMIT_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector);
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
send_IPI_mask(cpu_online_map, vector);
}
-#endif /* ASM_X86__MACH_BIGSMP__MACH_IPI_H */
+#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/include/asm-x86/mach-summit/irq_vectors_limits.h b/include/asm-x86/summit/irq_vectors_limits.h
index 22f376a..890ce3f 100644
--- a/include/asm-x86/mach-summit/irq_vectors_limits.h
+++ b/include/asm-x86/summit/irq_vectors_limits.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H
-#define ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H
+#ifndef _ASM_IRQ_VECTORS_LIMITS_H
+#define _ASM_IRQ_VECTORS_LIMITS_H
/*
* For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
@@ -11,4 +11,4 @@
#define NR_IRQS 224
#define NR_IRQ_VECTORS 1024
-#endif /* ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H */
+#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/summit/mpparse.h
index 92396f2..013ce6f 100644
--- a/include/asm-x86/mach-summit/mach_mpparse.h
+++ b/include/asm-x86/summit/mpparse.h
@@ -1,7 +1,6 @@
-#ifndef ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H
-#define ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H
+#ifndef __ASM_SUMMIT_MPPARSE_H
+#define __ASM_SUMMIT_MPPARSE_H
-#include <mach_apic.h>
#include <asm/tsc.h>
extern int use_cyclone;
@@ -12,11 +11,11 @@ extern void setup_summit(void);
#define setup_summit() {}
#endif
-static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
+static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
- if (!strncmp(oem, "IBM ENSW", 8) &&
- (!strncmp(productid, "VIGIL SMP", 9)
+ if (!strncmp(oem, "IBM ENSW", 8) &&
+ (!strncmp(productid, "VIGIL SMP", 9)
|| !strncmp(productid, "EXA", 3)
|| !strncmp(productid, "RUTHLESS SMP", 12))){
mark_tsc_unstable("Summit based system");
@@ -107,4 +106,4 @@ static inline int is_WPEG(struct rio_detail *rio){
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
}
-#endif /* ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H */
+#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 4db0066..3f4e52b 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -241,6 +241,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TS_POLLING 0x0004 /* true if in idle loop
and not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
+#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
index 9948dd3..89eaa54 100644
--- a/include/asm-x86/ucontext.h
+++ b/include/asm-x86/ucontext.h
@@ -1,6 +1,12 @@
#ifndef ASM_X86__UCONTEXT_H
#define ASM_X86__UCONTEXT_H
+#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state
+ * information in the memory layout pointed
+ * by the fpstate pointer in the ucontext's
+ * sigcontext struct (uc_mcontext).
+ */
+
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h
new file mode 100644
index 0000000..f2cba4e7
--- /dev/null
+++ b/include/asm-x86/xcr.h
@@ -0,0 +1,49 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2008 rPath, Inc. - All Rights Reserved
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2 or (at your
+ * option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * asm-x86/xcr.h
+ *
+ * Definitions for the eXtended Control Register instructions
+ */
+
+#ifndef _ASM_X86_XCR_H
+#define _ASM_X86_XCR_H
+
+#define XCR_XFEATURE_ENABLED_MASK 0x00000000
+
+#ifdef __KERNEL__
+# ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+static inline u64 xgetbv(u32 index)
+{
+ u32 eax, edx;
+
+ asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
+ : "=a" (eax), "=d" (edx)
+ : "c" (index));
+ return eax + ((u64)edx << 32);
+}
+
+static inline void xsetbv(u32 index, u64 value)
+{
+ u32 eax = value;
+ u32 edx = value >> 32;
+
+ asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
+ : : "a" (eax), "d" (edx), "c" (index));
+}
+
+# endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_XCR_H */
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h
new file mode 100644
index 0000000..08e9a1a
--- /dev/null
+++ b/include/asm-x86/xsave.h
@@ -0,0 +1,118 @@
+#ifndef __ASM_X86_XSAVE_H
+#define __ASM_X86_XSAVE_H
+
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+
+#define XSTATE_FP 0x1
+#define XSTATE_SSE 0x2
+
+#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
+
+#define FXSAVE_SIZE 512
+
+/*
+ * These are the features that the OS can handle currently.
+ */
+#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
+
+#ifdef CONFIG_X86_64
+#define REX_PREFIX "0x48, "
+#else
+#define REX_PREFIX
+#endif
+
+extern unsigned int xstate_size;
+extern u64 pcntxt_mask;
+extern struct xsave_struct *init_xstate_buf;
+
+extern void xsave_cntxt_init(void);
+extern void xsave_init(void);
+extern int init_fpu(struct task_struct *child);
+extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
+ void __user *fpstate,
+ struct _fpx_sw_bytes *sw);
+
+static inline int xrstor_checking(struct xsave_struct *fx)
+{
+ int err;
+
+ asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [err] "=r" (err)
+ : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
+ : "memory");
+
+ return err;
+}
+
+static inline int xsave_user(struct xsave_struct __user *buf)
+{
+ int err;
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b,3b\n"
+ ".previous"
+ : [err] "=r" (err)
+ : "D" (buf), "a" (-1), "d" (-1), "0" (0)
+ : "memory");
+ if (unlikely(err) && __clear_user(buf, xstate_size))
+ err = -EFAULT;
+ /* No need to clear here because the caller clears USED_MATH */
+ return err;
+}
+
+static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+{
+ int err;
+ struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b,3b\n"
+ ".previous"
+ : [err] "=r" (err)
+ : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
+ : "memory"); /* memory required? */
+ return err;
+}
+
+static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
+{
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
+ asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
+ : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+}
+
+static inline void xsave(struct task_struct *tsk)
+{
+ /* This, however, we can work around by forcing the compiler to select
+ an addressing mode that doesn't require extended registers. */
+ __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
+ : : "D" (&(tsk->thread.xstate->xsave)),
+ "a" (-1), "d"(-1) : "memory");
+}
+#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 56c73b8..c360c55 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -25,9 +25,99 @@
#include <linux/types.h>
#include <linux/msi.h>
-#ifdef CONFIG_DMAR
+#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu;
+struct dmar_drhd_unit {
+ struct list_head list; /* list of drhd units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ u64 reg_base_addr; /* register base address*/
+ struct pci_dev **devices; /* target device array */
+ int devices_cnt; /* target device count */
+ u8 ignored:1; /* ignore drhd */
+ u8 include_all:1;
+ struct intel_iommu *iommu;
+};
+
+extern struct list_head dmar_drhd_units;
+
+#define for_each_drhd_unit(drhd) \
+ list_for_each_entry(drhd, &dmar_drhd_units, list)
+
+extern int dmar_table_init(void);
+extern int early_dmar_detect(void);
+extern int dmar_dev_scope_init(void);
+
+/* Intel IOMMU detection */
+extern void detect_intel_iommu(void);
+
+
+extern int parse_ioapics_under_ir(void);
+extern int alloc_iommu(struct dmar_drhd_unit *);
+#else
+static inline void detect_intel_iommu(void)
+{
+ return;
+}
+
+static inline int dmar_table_init(void)
+{
+ return -ENODEV;
+}
+#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
+
+#ifdef CONFIG_INTR_REMAP
+extern int intr_remapping_enabled;
+extern int enable_intr_remapping(int);
+
+struct irte {
+ union {
+ struct {
+ __u64 present : 1,
+ fpd : 1,
+ dst_mode : 1,
+ redir_hint : 1,
+ trigger_mode : 1,
+ dlvry_mode : 3,
+ avail : 4,
+ __reserved_1 : 4,
+ vector : 8,
+ __reserved_2 : 8,
+ dest_id : 32;
+ };
+ __u64 low;
+ };
+
+ union {
+ struct {
+ __u64 sid : 16,
+ sq : 2,
+ svt : 2,
+ __reserved_3 : 44;
+ };
+ __u64 high;
+ };
+};
+extern int get_irte(int irq, struct irte *entry);
+extern int modify_irte(int irq, struct irte *irte_modified);
+extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
+extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
+ u16 sub_handle);
+extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
+extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
+extern int flush_irte(int irq);
+extern int free_irte(int irq);
+
+extern int irq_remapped(int irq);
+extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
+extern struct intel_iommu *map_ioapic_to_ir(int apic);
+#else
+#define irq_remapped(irq) (0)
+#define enable_intr_remapping(mode) (-1)
+#define intr_remapping_enabled (0)
+#endif
+
+#ifdef CONFIG_DMAR
extern const char *dmar_get_fault_reason(u8 fault_reason);
/* Can't use the common MSI interrupt functions
@@ -40,47 +130,30 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern int arch_setup_dmar_msi(unsigned int irq);
-/* Intel IOMMU detection and initialization functions */
-extern void detect_intel_iommu(void);
-extern int intel_iommu_init(void);
-
-extern int dmar_table_init(void);
-extern int early_dmar_detect(void);
-
-extern struct list_head dmar_drhd_units;
+extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
-
-struct dmar_drhd_unit {
- struct list_head list; /* list of drhd units */
- u64 reg_base_addr; /* register base address*/
- struct pci_dev **devices; /* target device array */
- int devices_cnt; /* target device count */
- u8 ignored:1; /* ignore drhd */
- u8 include_all:1;
- struct intel_iommu *iommu;
-};
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
u64 base_address; /* reserved base address*/
u64 end_address; /* reserved end address */
struct pci_dev **devices; /* target devices */
int devices_cnt; /* target device count */
};
-#define for_each_drhd_unit(drhd) \
- list_for_each_entry(drhd, &dmar_drhd_units, list)
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+/* Intel DMAR initialization functions */
+extern int intel_iommu_init(void);
+extern int dmar_disabled;
#else
-static inline void detect_intel_iommu(void)
-{
- return;
-}
static inline int intel_iommu_init(void)
{
+#ifdef CONFIG_INTR_REMAP
+ return dmar_dev_scope_init();
+#else
return -ENODEV;
+#endif
}
-
#endif /* !CONFIG_DMAR */
#endif /* __DMAR_H__ */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 350033e..ee9bcc6 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -108,6 +108,9 @@ extern struct resource iomem_resource;
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
+extern void reserve_region_with_split(struct resource *root,
+ resource_size_t start, resource_size_t end,
+ const char *name);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8ccb462..8d9411b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -62,6 +62,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
+#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index fac3337..9f2a375 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -23,12 +23,19 @@
__attribute__((__section__(SHARED_ALIGNED_SECTION))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
____cacheline_aligned_in_smp
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.page_aligned"))) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#else
#define DEFINE_PER_CPU(type, name) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
DEFINE_PER_CPU(type, name)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#endif
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)