summaryrefslogtreecommitdiff
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/acpi/wakeup.S41
-rw-r--r--arch/i386/kernel/alternative.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c60
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/i386/kernel/cpu/perfctr-watchdog.c28
-rw-r--r--arch/i386/kernel/io_apic.c8
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/ptrace.c1
-rw-r--r--arch/i386/kernel/traps.c33
-rw-r--r--arch/i386/kernel/tsc.c1
10 files changed, 116 insertions, 68 deletions
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index ed0a0f2..f22ba85 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -151,51 +151,30 @@ bogus_real_magic:
#define VIDEO_FIRST_V7 0x0900
# Setting of user mode (AX=mode ID) => CF=success
+
+# For now, we only handle VESA modes (0x0200..0x03ff). To handle other
+# modes, we should probably compile in the video code from the boot
+# directory.
mode_set:
movw %ax, %bx
-#if 0
- cmpb $0xff, %ah
- jz setalias
-
- testb $VIDEO_RECALC>>8, %ah
- jnz _setrec
-
- cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
- jnc setres
-
- cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
- jz setspc
-
- cmpb $VIDEO_FIRST_V7>>8, %ah
- jz setv7
-#endif
-
- cmpb $VIDEO_FIRST_VESA>>8, %ah
- jnc check_vesa
-#if 0
- orb %ah, %ah
- jz setmenu
-#endif
-
- decb %ah
-# jz setbios Add bios modes later
+ subb $VIDEO_FIRST_VESA>>8, %bh
+ cmpb $2, %bh
+ jb check_vesa
-setbad: clc
+setbad:
+ clc
ret
check_vesa:
- subb $VIDEO_FIRST_VESA>>8, %bh
orw $0x4000, %bx # Use linear frame buffer
movw $0x4f02, %ax # VESA BIOS mode set call
int $0x10
cmpw $0x004f, %ax # AL=4f if implemented
- jnz _setbad # AH=0 if OK
+ jnz setbad # AH=0 if OK
stc
ret
-_setbad: jmp setbad
-
.code32
ALIGN
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 9f4ac8b..bd72d94 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -445,8 +445,6 @@ void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
{
memcpy(addr, opcode, len);
sync_core();
- /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
- case. */
- if (cpu_has_clflush)
- asm("clflush (%0) " :: "r" (addr) : "memory");
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
}
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index ef8f0bc..f0cce3c 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -76,6 +76,7 @@ static unsigned int longhaul_index;
/* Module parameters */
static int scale_voltage;
static int disable_acpi_c3;
+static int revid_errata;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
@@ -168,7 +169,10 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index,
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
- longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ if (!revid_errata)
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ else
+ longhaul.bits.RevisionKey = 0;
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
/* Setup new voltage */
@@ -272,7 +276,7 @@ static void longhaul_setstate(unsigned int table_index)
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
-
+retry_loop:
preempt_disable();
local_irq_save(flags);
@@ -344,6 +348,47 @@ static void longhaul_setstate(unsigned int table_index)
preempt_enable();
freqs.new = calc_speed(longhaul_get_cpu_mult());
+ /* Check if requested frequency is set. */
+ if (unlikely(freqs.new != speed)) {
+ printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+ /* Revision ID = 1 but processor is expecting revision key
+ * equal to 0. Jumpers at the bottom of processor will change
+ * multiplier and FSB, but will not change bits in Longhaul
+ * MSR nor enable voltage scaling. */
+ if (!revid_errata) {
+ printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
+ "option.\n");
+ revid_errata = 1;
+ msleep(200);
+ goto retry_loop;
+ }
+ /* Why ACPI C3 sometimes doesn't work is a mystery for me.
+ * But it does happen. Processor is entering ACPI C3 state,
+ * but it doesn't change frequency. I tried poking various
+ * bits in northbridge registers, but without success. */
+ if (longhaul_flags & USE_ACPI_C3) {
+ printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+ longhaul_flags &= ~USE_ACPI_C3;
+ if (revid_errata) {
+ printk(KERN_INFO PFX "Disabling \"Ignore "
+ "Revision ID\" option.\n");
+ revid_errata = 0;
+ }
+ msleep(200);
+ goto retry_loop;
+ }
+ /* This shouldn't happen. Longhaul ver. 2 was reported not
+ * working on processors without voltage scaling, but with
+ * RevID = 1. RevID errata will make things right. Just
+ * to be 100% sure. */
+ if (longhaul_version == TYPE_LONGHAUL_V2) {
+ printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+ longhaul_version = TYPE_LONGHAUL_V1;
+ msleep(200);
+ goto retry_loop;
+ }
+ }
+ /* Report true CPU frequency */
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
if (!bm_timeout)
@@ -956,11 +1001,20 @@ static void __exit longhaul_exit(void)
kfree(longhaul_table);
}
+/* Even if BIOS is exporting ACPI C3 state, and it is used
+ * with success when CPU is idle, this state doesn't
+ * trigger frequency transition in some cases. */
module_param (disable_acpi_c3, int, 0644);
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
-
+/* Change CPU voltage with frequency. Very usefull to save
+ * power, but most VIA C3 processors aren't supporting it. */
module_param (scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
+/* Force revision key to 0 for processors which doesn't
+ * support voltage scaling, but are introducing itself as
+ * such. */
+module_param(revid_errata, int, 0644);
+MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index d5a456d..db6c25a 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -515,7 +515,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
cpuid4_info[cpu] = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(cpuid4_info[cpu] == NULL))
+ if (cpuid4_info[cpu] == NULL)
return -ENOMEM;
oldmask = current->cpus_allowed;
@@ -748,6 +748,8 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
+ if (cpuid4_info[cpu] == NULL)
+ return;
for (i = 0; i < num_cache_leaves; i++) {
cache_remove_shared_cpu_map(cpu, i);
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
index 4be488e..93fecd4 100644
--- a/arch/i386/kernel/cpu/perfctr-watchdog.c
+++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
@@ -263,8 +263,8 @@ static int setup_k7_watchdog(unsigned nmi_hz)
unsigned int evntsel;
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
- perfctr_msr = MSR_K7_PERFCTR0;
- evntsel_msr = MSR_K7_EVNTSEL0;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
wrmsrl(perfctr_msr, 0UL);
@@ -343,8 +343,8 @@ static int setup_p6_watchdog(unsigned nmi_hz)
unsigned int evntsel;
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
- perfctr_msr = MSR_P6_PERFCTR0;
- evntsel_msr = MSR_P6_EVNTSEL0;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
/* KVM doesn't implement this MSR */
if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
@@ -569,8 +569,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
return 0;
- perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1;
- evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
wrmsrl(perfctr_msr, 0UL);
@@ -605,6 +605,16 @@ static struct wd_ops intel_arch_wd_ops = {
.evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
};
+static struct wd_ops coreduo_wd_ops = {
+ .reserve = single_msr_reserve,
+ .unreserve = single_msr_unreserve,
+ .setup = setup_intel_arch_watchdog,
+ .rearm = p6_rearm,
+ .stop = single_msr_stop_watchdog,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
+};
+
static void probe_nmi_watchdog(void)
{
switch (boot_cpu_data.x86_vendor) {
@@ -615,6 +625,12 @@ static void probe_nmi_watchdog(void)
wd_ops = &k7_wd_ops;
break;
case X86_VENDOR_INTEL:
+ /* Work around Core Duo (Yonah) errata AE49 where perfctr1
+ doesn't have a working enable bit. */
+ if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
+ wd_ops = &coreduo_wd_ops;
+ break;
+ }
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
wd_ops = &intel_arch_wd_ops;
break;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 4b8a8da4..e2f4a1c 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -754,14 +754,6 @@ static int pirq_entries [MAX_PIRQS];
static int pirqs_enabled;
int skip_ioapic_setup;
-static int __init ioapic_setup(char *str)
-{
- skip_ioapic_setup = 1;
- return 1;
-}
-
-__setup("noapic", ioapic_setup);
-
static int __init ioapic_pirq_setup(char *str)
{
int i, max;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 8c1c965..c7227e2 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -115,12 +115,12 @@ static int __init check_nmi_watchdog(void)
atomic_dec(&nmi_active);
}
}
+ endflag = 1;
if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
return -1;
}
- endflag = 1;
printk("OK.\n");
/* now that we know it works we can reduce NMI frequency to
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0c8f00e..7c1b925 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -274,7 +274,6 @@ static void clear_singlestep(struct task_struct *child)
void ptrace_disable(struct task_struct *child)
{
clear_singlestep(child);
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
}
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index cfffe3d..47b0bef 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -100,36 +100,45 @@ asmlinkage void machine_check(void);
int kstack_depth_to_print = 24;
static unsigned int code_bytes = 64;
-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
{
return p > (void *)tinfo &&
- p < (void *)tinfo + THREAD_SIZE - 3;
+ p <= (void *)tinfo + THREAD_SIZE - size;
}
+/* The form of the top of the frame on the stack */
+struct stack_frame {
+ struct stack_frame *next_frame;
+ unsigned long return_address;
+};
+
static inline unsigned long print_context_stack(struct thread_info *tinfo,
unsigned long *stack, unsigned long ebp,
struct stacktrace_ops *ops, void *data)
{
- unsigned long addr;
-
#ifdef CONFIG_FRAME_POINTER
- while (valid_stack_ptr(tinfo, (void *)ebp)) {
- unsigned long new_ebp;
- addr = *(unsigned long *)(ebp + 4);
+ struct stack_frame *frame = (struct stack_frame *)ebp;
+ while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
+ struct stack_frame *next;
+ unsigned long addr;
+
+ addr = frame->return_address;
ops->address(data, addr);
/*
* break out of recursive entries (such as
* end_of_stack_stop_unwind_function). Also,
* we can never allow a frame pointer to
* move downwards!
- */
- new_ebp = *(unsigned long *)ebp;
- if (new_ebp <= ebp)
+ */
+ next = frame->next_frame;
+ if (next <= frame)
break;
- ebp = new_ebp;
+ frame = next;
}
#else
- while (valid_stack_ptr(tinfo, stack)) {
+ while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
+ unsigned long addr;
+
addr = *stack++;
if (__kernel_text_address(addr))
ops->address(data, addr);
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index debd7db..a39280b 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -292,7 +292,6 @@ static struct clocksource clocksource_tsc = {
void mark_tsc_unstable(char *reason)
{
- sched_clock_unstable_event();
if (!tsc_unstable) {
tsc_unstable = 1;
tsc_enabled = 0;