From e8f380e00840f694599e6ab42806639f7de26f11 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 22 May 2012 12:53:45 +0200 Subject: x86/bitops: Move BIT_64() for a wider use Needed for shifting 64-bit values on 32-bit, like MSR values, for example. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Cc: Frank Arnold Link: http://lkml.kernel.org/r/1337684026-19740-1-git-send-email-bp@amd64.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index b97596e..a6983b2 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -15,6 +15,8 @@ #include #include +#define BIT_64(n) (U64_C(1) << (n)) + /* * These have to be done with inline assembly: that way the bit-setting * is guaranteed to be atomic. All bit operations return 0 if the bit diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h index c6074c5..8c87a5e 100644 --- a/drivers/edac/mce_amd.h +++ b/drivers/edac/mce_amd.h @@ -5,8 +5,6 @@ #include -#define BIT_64(n) (U64_C(1) << (n)) - #define EC(x) ((x) & 0xffff) #define XEC(x, mask) (((x) >> 16) & mask) -- cgit v0.10.2 From 80f033610fb968e75f5d470233d8d0260d7a72ed Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 22 May 2012 12:53:46 +0200 Subject: x86/mce: Fix 32-bit build Got bitten again by the BIT() macro: arch/x86/kernel/cpu/mcheck/mce.c: In function '__mcheck_cpu_apply_quirks': arch/x86/kernel/cpu/mcheck/mce.c:1453:6: warning: left shift count >= width of type arch/x86/kernel/cpu/mcheck/mce.c:1454:7: warning: left shift count >= width of type Fix it already. Signed-off-by: Borislav Petkov Cc: Frank Arnold Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1337684026-19740-2-git-send-email-bp@amd64.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 888fbf9..0456b9a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1450,9 +1450,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) rdmsrl(msrs[i], val); /* CntP bit set? */ - if (val & BIT(62)) { - val &= ~BIT(62); - wrmsrl(msrs[i], val); + if (val & BIT_64(62)) { + val &= ~BIT_64(62); + wrmsrl(msrs[i], val); } } -- cgit v0.10.2 From 1b38a3a10f2ad96a3c0130f63b7f3610bab7090d Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 25 May 2012 11:40:09 +0100 Subject: x86: hpet: Fix copy-and-paste mistake in earlier change This fixes an oversight in 396e2c6fed4ff13b53ce0e573105531cf53b0cad ("x86: Clear HPET configuration registers on startup"), noticed by Thomas Gleixner. Signed-off-by: Jan Beulich Link: http://lkml.kernel.org/r/4FBF7DA902000078000861EE@nat28.tlf.novell.com Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 9cc7b43..1460a5d 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -870,7 +870,7 @@ int __init hpet_enable(void) else pr_warn("HPET initial state will not be saved\n"); cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); - hpet_writel(cfg, HPET_Tn_CFG(i)); + hpet_writel(cfg, HPET_CFG); if (cfg) pr_warn("HPET: Unrecognized bits %#x set in global cfg\n", cfg); -- cgit v0.10.2 From fa83523f45fbb403eba4ebc5704bf98aa4da0163 Mon Sep 17 00:00:00 2001 From: John Dykstra Date: Fri, 25 May 2012 16:12:46 -0500 Subject: x86/mm/pat: Improve scaling of pat_pagerange_is_ram() Function pat_pagerange_is_ram() scales poorly to large address ranges, because it probes the resource tree for each page. On a 2.6 GHz Opteron, this function consumes 34 ms for a 1 GB range. It is called twice during untrack_pfn_vma(), slowing process cleanup and handicapping the OOM killer. This replacement consumes less than 1ms, under the same conditions. Signed-off-by: John Dykstra on behalf of Cray Inc. Acked-by: Suresh Siddha Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1337980366.1979.6.camel@redwood [ Small stylistic cleanups and renames ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index f6ff57b..bea6e57 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -158,31 +158,47 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) return req_type; } +struct pagerange_state { + unsigned long cur_pfn; + int ram; + int not_ram; +}; + +static int +pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) +{ + struct pagerange_state *state = arg; + + state->not_ram |= initial_pfn > state->cur_pfn; + state->ram |= total_nr_pages > 0; + state->cur_pfn = initial_pfn + total_nr_pages; + + return state->ram && state->not_ram; +} + static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) { - int ram_page = 0, not_rampage = 0; - unsigned long page_nr; + int ret = 0; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; + struct pagerange_state state = {start_pfn, 0, 0}; - for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); - ++page_nr) { - /* - * For legacy reasons, physical address range in the legacy ISA - * region is tracked as non-RAM. This will allow users of - * /dev/mem to map portions of legacy ISA region, even when - * some of those portions are listed(or not even listed) with - * different e820 types(RAM/reserved/..) - */ - if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && - page_is_ram(page_nr)) - ram_page = 1; - else - not_rampage = 1; - - if (ram_page == not_rampage) - return -1; + /* + * For legacy reasons, physical address range in the legacy ISA + * region is tracked as non-RAM. This will allow users of + * /dev/mem to map portions of legacy ISA region, even when + * some of those portions are listed(or not even listed) with + * different e820 types(RAM/reserved/..) + */ + if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) + start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; + + if (start_pfn < end_pfn) { + ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, + &state, pagerange_is_ram_callback); } - return ram_page; + return (ret > 0) ? -1 : (state.ram ? 1 : 0); } /* -- cgit v0.10.2 From 319b6ffc6df892e4ccffff823cc5521a4a5d2dca Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 30 May 2012 12:33:41 +0300 Subject: x86, realmode: Unbreak the ia64 build of drivers/acpi/sleep.c Revert usage of acpi_wakeup_address and move definition to x86 architecture code in order to make compilation work in ia64. [jsakkine: tested compilation in ia64/x86-64 and added proper commit message] Reported-by: Paul Gortmaker Originally-by: H. Peter Anvin Signed-off-by: Jarkko Sakkinen Link: http://lkml.kernel.org/r/1338370421-27735-1-git-send-email-jarkko.sakkinen@intel.com Cc: Tony Luck Cc: Len Brown Signed-off-by: H. Peter Anvin diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 724aa44..0c44630 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -29,6 +29,7 @@ #include #include #include +#include #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long @@ -116,10 +117,8 @@ static inline void acpi_disable_pci(void) /* Low-level suspend routine. */ extern int acpi_suspend_lowlevel(void); -extern const unsigned char acpi_wakeup_code[]; - -/* early initialization routine */ -extern void acpi_reserve_wakeup_memory(void); +/* Physical address to resume after wakeup */ +#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start)) /* * Check if the CPU can handle C2 and deeper diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index ebaa045..74ee4ab 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -25,8 +25,6 @@ #include #include -#include - #include "internal.h" #include "sleep.h" @@ -93,13 +91,11 @@ static struct notifier_block tts_notifier = { static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP - unsigned long wakeup_pa = real_mode_header->wakeup_start; /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { - if (!wakeup_pa) + if (!acpi_wakeup_address) return -EFAULT; - acpi_set_firmware_waking_vector( - (acpi_physical_address)wakeup_pa); + acpi_set_firmware_waking_vector(acpi_wakeup_address); } ACPI_FLUSH_CPU_CACHE(); -- cgit v0.10.2 From 2da06af8106f8f35318bb084baf8448797ef058a Mon Sep 17 00:00:00 2001 From: "zhenzhong.duan" Date: Wed, 30 May 2012 12:52:15 +0800 Subject: x86, mtrr: Fix a type overflow in range_to_mtrr func When boot on sun G5+ with 4T mem, see an overflow in mtrr cleanup as below. *BAD*gran_size: 2G chunk_size: 2G num_reg: 10 lose cover RAM: -18014398505283592M This is because 1<<31 sign extended. Use an unsigned long constant to fix it. Useful for mem larger than or equal to 4T. -v2: Use 64bit constant instead of explicit type conversion as suggested by Yinghai. Description updated too. Signed-off-by: Zhenzhong Duan Link: http://lkml.kernel.org/r/4FC5A77F.6060505@oracle.com Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index ac140c7..bdda2e6 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, if (align > max_align) align = max_align; - sizek = 1 << align; + sizek = 1UL << align; if (debug_print) { char start_factor = 'K', size_factor = 'K'; unsigned long start_base, size_base; -- cgit v0.10.2 From 1ab46fd319bcf1fcd9fb6311727d532b580e4eba Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 30 May 2012 18:23:56 -0400 Subject: x86, amd, xen: Avoid NULL pointer paravirt references Stub out MSR methods that aren't actually needed. This fixes a crash as Xen Dom0 on AMD Trinity systems. A bigger patch should be added to remove the paravirt machinery completely for the methods which apparently have no users! Reported-by: Andre Przywara Link: http://lkml.kernel.org/r/20120530222356.GA28417@andromeda.dapyr.net Signed-off-by: H. Peter Anvin Cc: diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 75f33b2..e74df95 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1116,7 +1116,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, + .rdmsr_regs = native_rdmsr_safe_regs, .write_msr = xen_write_msr_safe, + .wrmsr_regs = native_wrmsr_safe_regs, + .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, -- cgit v0.10.2