diff options
Diffstat (limited to 'arch/powerpc/kernel')
24 files changed, 658 insertions, 135 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 8f61934..e50da21 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -7,7 +7,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifeq ($(CONFIG_PPC64),y) -CFLAGS_prom_init.o += -mno-minimal-toc +CFLAGS_prom_init.o += $(NO_MINIMAL_TOC) endif ifeq ($(CONFIG_PPC32),y) CFLAGS_prom_init.o += -fPIC @@ -87,6 +87,7 @@ extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds obj-$(CONFIG_RELOCATABLE_PPC32) += reloc_32.o +obj-$(CONFIG_FSL_SOC_BOOKE) += fsl_booke_cache.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4c3e321..c451d43 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -170,6 +170,11 @@ int main(void) DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); + DEFINE(PACA_TLB_ESEL_NEXT, offsetof(struct tlb_per_core, esel_next)); + DEFINE(PACA_TLB_ESEL_MAX, offsetof(struct tlb_per_core, esel_max)); + DEFINE(PACA_TLB_ESEL_FIRST, offsetof(struct tlb_per_core, esel_first)); + DEFINE(PACA_TLB_LOCK, offsetof(struct tlb_per_core, lock)); + DEFINE(PACA_TLB_PER_CORE_PTR, offsetof(struct paca_struct, tlb_per_core_ptr)); #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PPC_STD_MMU_64 diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index dcd8819..d8c7baf 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -53,6 +53,17 @@ _GLOBAL(__e500_dcache_setup) isync blr +#ifdef CONFIG_PPC_BOOK3E_64 +_GLOBAL(__setup_cpu_e6500) + mflr r6 +#ifdef CONFIG_PPC64 + bl .setup_altivec_ivors +#endif + bl __setup_cpu_e5500 + mtlr r6 + blr +#endif + #ifdef CONFIG_PPC32 _GLOBAL(__setup_cpu_e200) /* enable dedicated debug exception handling resources (Debug APU) */ @@ -107,6 +118,13 @@ _GLOBAL(__setup_cpu_e5500) #endif #ifdef CONFIG_PPC_BOOK3E_64 +_GLOBAL(__restore_cpu_e6500) + mflr r5 + bl .setup_altivec_ivors + bl __restore_cpu_e5500 + mtlr r5 + blr + _GLOBAL(__restore_cpu_e5500) mflr r4 bl __e500_icache_setup diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 19599ef..5d116d0 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -74,7 +74,9 @@ extern void __restore_cpu_a2(void); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_e5500(void); +extern void __restore_cpu_e6500(void); #endif /* CONFIG_E500 */ /* This table only contains "desktop" CPUs, it need to be filled with embedded @@ -2002,6 +2004,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_e500v1, .machine_check = machine_check_e500, .platform = "ppc8540", + .l2cache_type = PPC_L2_CACHE_SOC, }, { /* e500v2 */ .pvr_mask = 0xffff0000, @@ -2021,6 +2024,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_e500v2, .machine_check = machine_check_e500, .platform = "ppc8548", + .l2cache_type = PPC_L2_CACHE_SOC, }, { /* e500mc */ .pvr_mask = 0xffff0000, @@ -2038,6 +2042,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_e500mc, .machine_check = machine_check_e500mc, .platform = "ppce500mc", + .l2cache_type = PPC_L2_CACHE_CORE, }, #endif /* CONFIG_PPC32 */ { /* e5500 */ @@ -2059,13 +2064,16 @@ static struct cpu_spec __initdata cpu_specs[] = { #endif .machine_check = machine_check_e500mc, .platform = "ppce5500", + .l2cache_type = PPC_L2_CACHE_CORE, }, +#ifndef CONFIG_PPC32 { /* e6500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80400000, .cpu_name = "e6500", .cpu_features = CPU_FTRS_E6500, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU | + PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, @@ -2073,13 +2081,13 @@ static struct cpu_spec __initdata cpu_specs[] = { .num_pmcs = 4, .oprofile_cpu_type = "ppc/e6500", .oprofile_type = PPC_OPROFILE_FSL_EMB, - .cpu_setup = __setup_cpu_e5500, -#ifndef CONFIG_PPC32 - .cpu_restore = __restore_cpu_e5500, -#endif + .cpu_setup = __setup_cpu_e6500, + .cpu_restore = __restore_cpu_e6500, .machine_check = machine_check_e500mc, .platform = "ppce6500", + .l2cache_type = PPC_L2_CACHE_CLUSTER, }, +#endif #ifdef CONFIG_PPC32 { /* default match */ .pvr_mask = 0x00000000, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97..1a3dd00 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -31,6 +31,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, struct dma_attrs *attrs) { void *ret; + phys_addr_t top_ram_pfn = memblock_end_of_DRAM(); #ifdef CONFIG_NOT_COHERENT_CACHE ret = __dma_alloc_coherent(dev, size, dma_handle, flag); if (ret == NULL) @@ -41,8 +42,18 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, struct page *page; int node = dev_to_node(dev); + /* + * check for crappy device which has dma_mask < ZONE_DMA, and + * we are not going to support it, just warn and fail. + */ + if (*dev->dma_mask < DMA_BIT_MASK(31)) { + dev_err(dev, "Unsupported dma_mask 0x%llx\n", *dev->dma_mask); + return NULL; + } /* ignore region specifiers */ - flag &= ~(__GFP_HIGHMEM); + flag &= ~(__GFP_HIGHMEM | __GFP_DMA); + if (*dev->dma_mask < top_ram_pfn - 1) + flag |= GFP_DMA; page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 00d1c05..1b1c8ae 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -194,7 +194,7 @@ syscall_exit: * and so that we don't get interrupted after loading SRR0/1. */ #ifdef CONFIG_PPC_BOOK3E - wrteei 0 + fsl_erratum_a006198_wrteei0 r10 r9 #else ld r10,PACAKMSR(r13) /* @@ -576,7 +576,7 @@ _GLOBAL(ret_from_except_lite) * from the interrupt. */ #ifdef CONFIG_PPC_BOOK3E - wrteei 0 + fsl_erratum_a006198_wrteei0 r10 r9 #else ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ mtmsrd r10,1 /* Update machine state */ @@ -678,7 +678,7 @@ check_count: * interrupted after loading SRR0/1. */ #ifdef CONFIG_PPC_BOOK3E - wrteei 0 + fsl_erratum_a006198_wrteei0 r10 r5 #else ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ mtmsrd r10,1 /* Update machine state */ diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S index 62c0dc2..9f1ebf7 100644 --- a/arch/powerpc/kernel/epapr_hcalls.S +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -17,6 +17,7 @@ #include <asm/asm-compat.h> #include <asm/asm-offsets.h> +#ifndef CONFIG_PPC64 /* epapr_ev_idle() was derived from e500_idle() */ _GLOBAL(epapr_ev_idle) CURRENT_THREAD_INFO(r3, r1) @@ -42,6 +43,7 @@ epapr_ev_idle_start: * _TLF_NAPPING. */ b idle_loop +#endif /* Hypercall entry point. Will be patched with device tree instructions. */ .global epapr_hypercall_start diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index f3eab85..9848713 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -28,7 +28,7 @@ extern u32 epapr_ev_idle_start[]; bool epapr_paravirt_enabled; -static int __init epapr_paravirt_init(void) +int __init epapr_paravirt_init(void) { struct device_node *hyper_node; const u32 *insts; @@ -58,4 +58,3 @@ static int __init epapr_paravirt_init(void) return 0; } -early_initcall(epapr_paravirt_init); diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 4684e33..39308c7 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -298,6 +298,8 @@ interrupt_base_book3e: /* fake trap */ EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ EXCEPTION_STUB(0x1c0, data_tlb_miss) EXCEPTION_STUB(0x1e0, instruction_tlb_miss) + EXCEPTION_STUB(0x200, altivec_unavailable) /* 0x0f20 */ + EXCEPTION_STUB(0x220, altivec_assist) /* 0x1700 */ EXCEPTION_STUB(0x260, perfmon) EXCEPTION_STUB(0x280, doorbell) EXCEPTION_STUB(0x2a0, doorbell_crit) @@ -394,6 +396,45 @@ interrupt_end_book3e: bl .kernel_fp_unavailable_exception b .ret_from_except +/* Altivec Unavailable Interrupt */ + START_EXCEPTION(altivec_unavailable); + NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, + PROLOG_ADDITION_NONE) + /* we can probably do a shorter exception entry for that one... */ + EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP) +#ifdef CONFIG_ALTIVEC +BEGIN_FTR_SECTION + ld r12,_MSR(r1) + andi. r0,r12,MSR_PR; + beq- 1f + bl .load_up_altivec + b fast_exception_return +1: +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) +#endif + INTS_DISABLE + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl .altivec_unavailable_exception + b .ret_from_except + +/* AltiVec Assist */ + START_EXCEPTION(altivec_assist); + NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST, + PROLOG_ADDITION_NONE) + EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE) + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD +#ifdef CONFIG_ALTIVEC +BEGIN_FTR_SECTION + bl .altivec_assist_exception +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) +#else + bl .unknown_exception +#endif + b .ret_from_except + + /* Decrementer Interrupt */ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, decrementer, .timer_interrupt, ACK_DEC) @@ -653,6 +694,7 @@ kernel_dbg_exc: mtcr r11 ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) + fsl_erratum_a006198_restore_srr r13 mfspr r13,SPRN_SPRG_GEN_SCRATCH rfi b . @@ -757,7 +799,7 @@ _GLOBAL(exception_return_book3e) */ .globl fast_exception_return fast_exception_return: - wrteei 0 + fsl_erratum_a006198_wrteei0 r0 r10 1: mr r0,r13 ld r10,_MSR(r1) REST_4GPRS(2, r1) @@ -793,6 +835,7 @@ fast_exception_return: mtspr SPRN_SRR1,r11 ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) + fsl_erratum_a006198_restore_srr r13 mfspr r13,SPRN_SPRG_GEN_SCRATCH rfi @@ -806,6 +849,7 @@ fast_exception_return: BAD_STACK_TRAMPOLINE(0x000) BAD_STACK_TRAMPOLINE(0x100) BAD_STACK_TRAMPOLINE(0x200) +BAD_STACK_TRAMPOLINE(0x220) BAD_STACK_TRAMPOLINE(0x260) BAD_STACK_TRAMPOLINE(0x280) BAD_STACK_TRAMPOLINE(0x2a0) @@ -1277,6 +1321,11 @@ _GLOBAL(book3e_secondary_core_init) /* Init global core bits */ 2: bl .init_core_book3e +BEGIN_FTR_SECTION + /* Start threads */ + bl .fsl_enable_threads +END_FTR_SECTION_IFSET(CPU_FTR_SMT) + /* Init per-thread bits */ 3: bl .init_thread_book3e @@ -1312,12 +1361,44 @@ _STATIC(init_core_book3e) sync blr +_GLOBAL(fsl_enable_threads) +BEGIN_FTR_SECTION + MFTMR(TMRN_TMCFG0, 3) + andi. r3,r3,0x3f + cmpi 0,r3,2 + blt 2f + + /* Disable the other thread */ + li r3,2 + mtspr SPRN_TENC,r3 + +1: mfspr r3,SPRN_TENSR + andi. r3,r3,2 + bne 1b + + /* Configure the MSR per the default */ + LOAD_REG_IMMEDIATE(r3, MSR_KERNEL); + MTTMR(TMRN_IMSR1, 3); + + /* + * Set the NIA for the secondary thread to + * generic_secondary_thread_init + */ + LOAD_REG_IMMEDIATE(r3, .fsl_secondary_thread_init); + MTTMR(TMRN_INIA1, 3); + + /* Release the other thread. It will spin until kick_cpu is called */ + li r3, 2 + mtspr SPRN_TENS, r3 +END_FTR_SECTION_IFSET(CPU_FTR_SMT) +2: blr + _STATIC(init_thread_book3e) lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h mtspr SPRN_EPCR,r3 /* Make sure interrupts are off */ - wrteei 0 + fsl_erratum_a006198_wrteei0 r3 r4 /* disable all timers and clear out status */ li r3,0 @@ -1330,18 +1411,18 @@ _STATIC(init_thread_book3e) _GLOBAL(__setup_base_ivors) SET_IVOR(0, 0x020) /* Critical Input */ SET_IVOR(1, 0x000) /* Machine Check */ - SET_IVOR(2, 0x060) /* Data Storage */ + SET_IVOR(2, 0x060) /* Data Storage */ SET_IVOR(3, 0x080) /* Instruction Storage */ - SET_IVOR(4, 0x0a0) /* External Input */ - SET_IVOR(5, 0x0c0) /* Alignment */ - SET_IVOR(6, 0x0e0) /* Program */ - SET_IVOR(7, 0x100) /* FP Unavailable */ - SET_IVOR(8, 0x120) /* System Call */ - SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ - SET_IVOR(10, 0x160) /* Decrementer */ - SET_IVOR(11, 0x180) /* Fixed Interval Timer */ - SET_IVOR(12, 0x1a0) /* Watchdog Timer */ - SET_IVOR(13, 0x1c0) /* Data TLB Error */ + SET_IVOR(4, 0x0a0) /* External Input */ + SET_IVOR(5, 0x0c0) /* Alignment */ + SET_IVOR(6, 0x0e0) /* Program */ + SET_IVOR(7, 0x100) /* FP Unavailable */ + SET_IVOR(8, 0x120) /* System Call */ + SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ + SET_IVOR(10, 0x160) /* Decrementer */ + SET_IVOR(11, 0x180) /* Fixed Interval Timer */ + SET_IVOR(12, 0x1a0) /* Watchdog Timer */ + SET_IVOR(13, 0x1c0) /* Data TLB Error */ SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ SET_IVOR(15, 0x040) /* Debug */ @@ -1349,6 +1430,11 @@ _GLOBAL(__setup_base_ivors) blr +_GLOBAL(setup_altivec_ivors) + SET_IVOR(32, 0x200) /* AltiVec Unavailable */ + SET_IVOR(33, 0x220) /* AltiVec Assist */ + blr + _GLOBAL(setup_perfmon_ivor) SET_IVOR(35, 0x260) /* Performance Monitor */ blr diff --git a/arch/powerpc/kernel/fsl_booke_cache.S b/arch/powerpc/kernel/fsl_booke_cache.S new file mode 100644 index 0000000..b295b0a --- /dev/null +++ b/arch/powerpc/kernel/fsl_booke_cache.S @@ -0,0 +1,219 @@ +/* + * Copyright 2009-2012 Freescale Semiconductor, Inc. + * Scott Wood <scottwood@freescale.com> + * Dave Liu <daveliu@freescale.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/reg.h> +#include <asm/page.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + + .section .text + +/******** L1 Cache ********/ + +/* flush L1 d-cache */ +_GLOBAL(flush_dcache_L1) + mfspr r3,SPRN_L1CFG0 + + rlwinm r5,r3,9,3 /* Extract cache block size */ + twlgti r5,1 /* Only 32 and 64 byte cache blocks + * are currently defined. + */ + li r4,32 + subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - + * log2(number of ways) + */ + slw r5,r4,r5 /* r5 = cache block size */ + + rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ + mulli r7,r7,13 /* An 8-way cache will require 13 + * loads per set. + */ + slw r7,r7,r6 + + /* save off HID0 and set DCFA */ + mfspr r8,SPRN_HID0 + ori r9,r8,HID0_DCFA@l + mtspr SPRN_HID0,r9 + isync + + LOAD_REG_IMMEDIATE(r4, KERNELBASE) + mtctr r7 + +1: lwz r3,0(r4) /* Load... */ + add r4,r4,r5 + bdnz 1b + + msync + LOAD_REG_IMMEDIATE(r4, KERNELBASE) + mtctr r7 + +1: dcbf 0,r4 /* ...and flush. */ + add r4,r4,r5 + bdnz 1b + + /* restore HID0 */ + mtspr SPRN_HID0,r8 + isync + + blr + +#define PVR_E6500 0x8040 + +/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */ +_GLOBAL(__flush_disable_L1) +/* L1 Data Cache of e6500 contains no modified data, no flush is required */ + mfspr r3, SPRN_PVR + rlwinm r4, r3, 16, 0xffff + lis r5, 0 + ori r5, r5, PVR_E6500@l + cmpw r4, r5 + beq 2f + mflr r10 + bl flush_dcache_L1 /* Flush L1 d-cache */ + mtlr r10 + +2: msync + mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */ + li r5, 2 + rlwimi r4, r5, 0, 3 + + msync + isync + mtspr SPRN_L1CSR0, r4 + isync + + msync +1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */ + andi. r4, r4, 2 + bne 1b + + msync + mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */ + li r5, 2 + rlwimi r4, r5, 0, 3 + + msync + isync + mtspr SPRN_L1CSR1, r4 + isync + msync + + blr + +/******** Backside L2 Cache ********/ + +#define SVR_P2040 0x821000 + +need_L2_cache: + /* skip L2 cache on P2040/P2040E as they have no L2 cache */ + mfspr r3, SPRN_SVR + /* shift right by 8 bits and clear E bit of SVR */ + rlwinm r4, r3, 24, ~0x800 + + lis r3, SVR_P2040@h + ori r3, r3, SVR_P2040@l + cmpw r4, r3 + beq 1f + + /* If L2 cache is disabled, skip it */ + mfspr r3, SPRN_L2CSR0 + andis. r3, r3, L2CSR0_L2E@h + beq 1f + + li r3, 0 + blr +1: + li r3, 1 + blr + +/* flush backside L2 cache */ +_GLOBAL(flush_backside_L2_cache) + mflr r10 + bl need_L2_cache + mtlr r10 + cmpwi r3, 0 + bne 2f + +__flush_backside_L2_cache: + /* Flush the L2 cache */ + mfspr r3, SPRN_L2CSR0 + ori r3, r3, L2CSR0_L2FL@l + msync + isync + mtspr SPRN_L2CSR0,r3 + isync +1: + mfspr r3,SPRN_L2CSR0 + andi. r3, r3, L2CSR0_L2FL@l + bne 1b +2: + blr + +/* flush and disable backside L2 cache */ +_GLOBAL(disable_backside_L2_cache) + mflr r10 + bl need_L2_cache + mtlr r10 + cmpwi r3, 0 + bne 1f + + mflr r10 + bl __flush_backside_L2_cache + mtlr r10 + + /* disable L2 cache */ + li r3, 0 + msync + isync + mtspr SPRN_L2CSR0, r3 + isync +1: + blr + +/******** Platform Cache ********/ + +#define L2CTL_L2E 0x80000000 +#define L2CTL_L2I 0x40000000 + +/* r3 = virtual address of L2 controller, WIMG = 01xx */ +_GLOBAL(flush_disable_L2) + /* It's a write-through cache, so only invalidation is needed. */ + mbar + isync + lwz r4, 0(r3) + li r5, 1 + rlwimi r4, r5, 30, L2CTL_L2E | L2CTL_L2I + stw r4, 0(r3) + + /* Wait for the invalidate to finish */ +1: lwz r4, 0(r3) + andis. r4, r4, L2CTL_L2I@h + bne 1b + mbar + + blr + +/* r3 = virtual address of L2 controller, WIMG = 01xx */ +_GLOBAL(invalidate_enable_L2) + mbar + isync + lwz r4, 0(r3) + li r5, 3 + rlwimi r4, r5, 30, L2CTL_L2E | L2CTL_L2I + stw r4, 0(r3) + + /* Wait for the invalidate to finish */ +1: lwz r4, 0(r3) + andis. r4, r4, L2CTL_L2I@h + bne 1b + mbar + + blr diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S index a92c79b..9638e50 100644 --- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S +++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S @@ -60,6 +60,10 @@ skpinv: addi r6,r6,1 /* Increment */ cmpw r6,r9 /* Are we done? */ bne 1b /* If not, repeat */ +#ifdef CONFIG_PPC_E500MC + /* Some chips can't handle tlbivax due to erratum A-004827 */ + tlbilxlpid +#else /* Invalidate TLB0 */ li r6,0x04 tlbivax 0,r6 @@ -68,6 +72,7 @@ skpinv: addi r6,r6,1 /* Increment */ li r6,0x0c tlbivax 0,r6 TLBSYNC +#endif /* 3. Setup a temp mapping and jump to it */ andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ @@ -147,10 +152,15 @@ skpinv: addi r6,r6,1 /* Increment */ rlwinm r6,r6,0,2,0 /* clear IPROT */ mtspr SPRN_MAS1,r6 tlbwe +#ifdef CONFIG_PPC_E500MC + /* Some chips can't handle tlbivax due to erratum A-004827 */ + tlbilxlpid +#else /* Invalidate TLB1 */ li r9,0x0c tlbivax 0,r9 TLBSYNC +#endif /* The mapping only needs to be cache-coherent on SMP */ #ifdef CONFIG_SMP @@ -229,7 +239,12 @@ next_tlb_setup: rlwinm r8,r8,0,2,0 /* clear IPROT */ mtspr SPRN_MAS1,r8 tlbwe +#ifdef CONFIG_PPC_E500MC + /* Some chips can't handle tlbivax due to erratum A-004827 */ + tlbilxlpid +#else /* Invalidate TLB1 */ li r9,0x0c tlbivax 0,r9 TLBSYNC +#endif diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 116f086..9342e97 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -161,6 +161,21 @@ exception_marker: #include "exceptions-64s.S" #endif +#ifdef CONFIG_PPC_BOOK3E +_GLOBAL(fsl_secondary_thread_init) +BEGIN_FTR_SECTION + /* Enable branch prediction */ + lis r3,BUCSR_INIT@h + ori r3,r3,BUCSR_INIT@l + mtspr SPRN_BUCSR,r3 + isync + + mfspr r3, SPRN_PIR + rlwimi r3, r3, 30, 2, 30 + mtspr SPRN_PIR, r3 +END_FTR_SECTION_IFSET(CPU_FTR_SMT) +#endif + _GLOBAL(generic_secondary_thread_init) mr r24,r3 @@ -169,6 +184,7 @@ _GLOBAL(generic_secondary_thread_init) /* get a valid TOC pointer, wherever we're mapped at */ bl .relative_toc + tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ @@ -195,6 +211,7 @@ _GLOBAL(generic_secondary_smp_init) /* get a valid TOC pointer, wherever we're mapped at */ bl .relative_toc + tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ @@ -295,7 +312,7 @@ _STATIC(__mmu_off) * * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content * in r3...r7 - * + * * r5 == NULL -> kexec style entry. r3 is a physical pointer to the * DT block, r4 is a physical pointer to the kernel itself * @@ -515,7 +532,7 @@ __secondary_start_pmac_0: b 1f li r24,3 1: - + _GLOBAL(pmac_secondary_start) /* turn on 64-bit mode */ bl .enable_64b_mode @@ -531,6 +548,7 @@ _GLOBAL(pmac_secondary_start) /* get TOC pointer (real address) */ bl .relative_toc + tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ bl .__restore_cpu_ppc970 @@ -620,7 +638,7 @@ __secondary_start: RFI b . /* prevent speculative execution */ -/* +/* * Running with relocation on at this point. All we want to do is * zero the stack back-chain pointer and get the TOC virtual address * before going into C code. @@ -665,6 +683,13 @@ _GLOBAL(enable_64b_mode) * This puts the TOC pointer into r2, offset by 0x8000 (as expected * by the toolchain). It computes the correct value for wherever we * are running at the moment, using position-independent code. + * + * Note: The compiler constructs pointers using offsets from the + * TOC in -mcmodel=medium mode. After we relocate to 0 but before + * the MMU is on we need our TOC to be a virtual address otherwise + * these pointers will be real addresses which may get stored and + * accessed later with the MMU on. We use tovirt() at the call + * sites to handle this. */ _GLOBAL(relative_toc) mflr r0 @@ -681,8 +706,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b * This is where the main kernel code starts. */ _INIT_STATIC(start_here_multiplatform) - /* set up the TOC (real address) */ - bl .relative_toc + /* set up the TOC */ + bl .relative_toc + tovirt(r2,r2) /* Clear out the BSS. It may have been done in prom_init, * already but that's irrelevant since prom_init will soon @@ -748,7 +774,7 @@ _INIT_STATIC(start_here_multiplatform) mtspr SPRN_SRR1,r4 RFI b . /* prevent speculative execution */ - + /* This is where all platforms converge execution */ _INIT_GLOBAL(start_here_common) /* relocation is on at this point */ @@ -760,6 +786,12 @@ _INIT_GLOBAL(start_here_common) /* Do more system initializations in virtual mode */ bl .setup_system +#ifdef CONFIG_PPC_BOOK3E +BEGIN_FTR_SECTION + bl .fsl_enable_threads +END_FTR_SECTION_IFSET(CPU_FTR_SMT) +#endif + /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 6f62a73..58925b6 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -987,80 +987,6 @@ _GLOBAL(set_context) isync /* Force context change */ blr -_GLOBAL(flush_dcache_L1) - mfspr r3,SPRN_L1CFG0 - - rlwinm r5,r3,9,3 /* Extract cache block size */ - twlgti r5,1 /* Only 32 and 64 byte cache blocks - * are currently defined. - */ - li r4,32 - subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - - * log2(number of ways) - */ - slw r5,r4,r5 /* r5 = cache block size */ - - rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ - mulli r7,r7,13 /* An 8-way cache will require 13 - * loads per set. - */ - slw r7,r7,r6 - - /* save off HID0 and set DCFA */ - mfspr r8,SPRN_HID0 - ori r9,r8,HID0_DCFA@l - mtspr SPRN_HID0,r9 - isync - - lis r4,KERNELBASE@h - mtctr r7 - -1: lwz r3,0(r4) /* Load... */ - add r4,r4,r5 - bdnz 1b - - msync - lis r4,KERNELBASE@h - mtctr r7 - -1: dcbf 0,r4 /* ...and flush. */ - add r4,r4,r5 - bdnz 1b - - /* restore HID0 */ - mtspr SPRN_HID0,r8 - isync - - blr - -/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */ -_GLOBAL(__flush_disable_L1) - mflr r10 - bl flush_dcache_L1 /* Flush L1 d-cache */ - mtlr r10 - - mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */ - li r5, 2 - rlwimi r4, r5, 0, 3 - - msync - isync - mtspr SPRN_L1CSR0, r4 - isync - -1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */ - andi. r4, r4, 2 - bne 1b - - mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */ - li r5, 2 - rlwimi r4, r5, 0, 3 - - mtspr SPRN_L1CSR1, r4 - isync - - blr - #ifdef CONFIG_SMP /* When we get here, r24 needs to hold the CPU # */ .globl __secondary_start diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index 4c7cb400..ee1f024 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -16,17 +16,19 @@ #include <asm/ppc-opcode.h> #include <asm/processor.h> #include <asm/thread_info.h> +#include <asm/epapr_hcalls.h> /* 64-bit version only for now */ #ifdef CONFIG_PPC64 -_GLOBAL(book3e_idle) +.macro BOOK3E_IDLE name loop +_GLOBAL(\name) /* Save LR for later */ mflr r0 std r0,16(r1) /* Hard disable interrupts */ - wrteei 0 + fsl_erratum_a006198_wrteei0 r0 r3 /* Now check if an interrupt came in while we were soft disabled * since we may otherwise lose it (doorbells etc...). @@ -67,7 +69,33 @@ _GLOBAL(book3e_idle) /* We can now re-enable hard interrupts and go to sleep */ wrteei 1 -1: PPC_WAIT(0) + \loop + +.endm + +.macro BOOK3E_IDLE_LOOP +1: + PPC_WAIT(0) b 1b +.endm + +/* epapr_ev_idle_start below is patched with the proper hcall + opcodes during kernel initialization */ +.macro EPAPR_EV_IDLE_LOOP +idle_loop: + LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) + +.global epapr_ev_idle_start +epapr_ev_idle_start: + li r3, -1 + nop + nop + nop + b idle_loop +.endm + +BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP + +BOOK3E_IDLE book3e_idle BOOK3E_IDLE_LOOP #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 39b77e8..b863b87 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -594,3 +594,6 @@ _GLOBAL(kexec_sequence) li r5,0 blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC */ + +_GLOBAL(fsl_erratum_a006198_return) + blr diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 9f44a77..6ee59a0 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -386,6 +386,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | (value & 0xffff); break; + case R_PPC64_TOC16_LO: + /* Subtract TOC pointer */ + value -= my_r2(sechdrs, me); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + case R_PPC64_TOC16_DS: /* Subtract TOC pointer */ value -= my_r2(sechdrs, me); @@ -399,6 +407,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | (value & 0xfffc); break; + case R_PPC64_TOC16_LO_DS: + /* Subtract TOC pointer */ + value -= my_r2(sechdrs, me); + if ((value & 3) != 0) { + printk("%s: bad TOC16_LO_DS relocation (%lu)\n", + me->name, value); + return -ENOEXEC; + } + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xfffc) + | (value & 0xfffc); + break; + + case R_PPC64_TOC16_HA: + /* Subtract TOC pointer */ + value -= my_r2(sechdrs, me); + value = ((value + 0x8000) >> 16); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + case R_PPC_REL24: /* FIXME: Handle weak symbols here --RR */ if (sym->st_shndx == SHN_UNDEF) { diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index cd6da85..08ee413 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -146,6 +146,11 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) #ifdef CONFIG_PPC_STD_MMU_64 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; #endif /* CONFIG_PPC_STD_MMU_64 */ + +#ifdef CONFIG_PPC_BOOK3E + /* For now -- if we have threads this will be adjusted later */ + new_paca->tlb_per_core_ptr = (uintptr_t)&new_paca->tlb_per_core; +#endif } /* Put the paca pointer into r13 and SPRG_PACA */ @@ -166,7 +171,6 @@ void setup_paca(struct paca_struct *new_paca) mtspr(SPRN_SPRG_HPACA, local_paca); #endif mtspr(SPRN_SPRG_PACA, local_paca); - } static int __initdata paca_size; diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 78b8766..d45b14d 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -196,3 +196,8 @@ EXPORT_SYMBOL_GPL(mmu_psize_defs); #ifdef CONFIG_EPAPR_PARAVIRT EXPORT_SYMBOL(epapr_hypercall_start); #endif + +#ifdef CONFIG_FSL_ERRATUM_A_006198 +void fsl_erratum_a006198_return(void); +EXPORT_SYMBOL(fsl_erratum_a006198_return); +#endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8b6f7a9..35bcfa7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -308,12 +308,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, /* Get physical cpuid */ intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); - if (intserv) { - nthreads = len / sizeof(int); - } else { - intserv = of_get_flat_dt_prop(node, "reg", NULL); - nthreads = 1; - } + if (!intserv) + intserv = of_get_flat_dt_prop(node, "reg", &len); + + nthreads = len / sizeof(int); /* * Now see if any of these threads match our boot cpu. @@ -848,23 +846,18 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &plen); if (intserv == NULL) { - const u32 *reg = of_get_property(np, "reg", NULL); - if (reg == NULL) + intserv = of_get_property(np, "reg", &plen); + if (intserv == NULL) continue; - if (*reg == hardid) { + } + + plen /= sizeof(u32); + for (t = 0; t < plen; t++) { + if (hardid == intserv[t]) { if (thread) - *thread = 0; + *thread = t; return np; } - } else { - plen /= sizeof(u32); - for (t = 0; t < plen; t++) { - if (hardid == intserv[t]) { - if (thread) - *thread = t; - return np; - } - } } } return NULL; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index bdc499c..1f6cbae0c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -444,16 +444,19 @@ void __init smp_setup_cpu_maps(void) intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (intserv) { - nthreads = len / sizeof(int); DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", nthreads); } else { DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); - intserv = of_get_property(dn, "reg", NULL); - if (!intserv) + intserv = of_get_property(dn, "reg", &len); + if (!intserv) { intserv = &cpu; /* assume logical == phys */ + len = 4; + } } + nthreads = len / sizeof(int); + for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { DBG(" thread %d -> cpu %d (hard id %d)\n", j, cpu, intserv[j]); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a8f54ec..1464655 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -38,6 +38,7 @@ #include <asm/serial.h> #include <asm/udbg.h> #include <asm/mmu_context.h> +#include <asm/epapr_hcalls.h> #include "setup.h" @@ -327,4 +328,5 @@ void __init setup_arch(char **cmdline_p) /* Initialize the MMU context management stuff */ mmu_context_init(); + epapr_paravirt_init(); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8d97eb4..c213b45 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -66,6 +66,7 @@ #include <asm/code-patching.h> #include <asm/kvm_ppc.h> #include <asm/hugetlb.h> +#include <asm/epapr_hcalls.h> #include "setup.h" @@ -102,6 +103,30 @@ int ucache_bsize; static char *smt_enabled_cmdline; +#ifdef CONFIG_PPC_BOOK3E +static void setup_tlb_per_core(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + int first = cpu_first_thread_sibling(cpu); + + paca[cpu].tlb_per_core_ptr = + (uintptr_t)&paca[first].tlb_per_core; + + /* If we have threads but no tlbsrx., use a per-core lock */ + if (smt_enabled_at_boot >= 2 && + !mmu_has_feature(MMU_FTR_USE_TLBRSRV)) + paca[cpu].tlb_per_core_ptr |= TLB_PER_CORE_HAS_LOCK; + } +} +#else +static void setup_tlb_per_core(void) +{ +} +#endif + + /* Look for ibm,smt-enabled OF option */ static void check_smt_enabled(void) { @@ -142,6 +167,8 @@ static void check_smt_enabled(void) of_node_put(dn); } } + + setup_tlb_per_core(); } /* Look for smt-enabled= cmdline option */ @@ -429,7 +456,11 @@ void __init setup_system(void) smp_setup_cpu_maps(); check_smt_enabled(); -#ifdef CONFIG_SMP + /* + * Freescale Book3e parts spin in a loop provided by firmware, + * so smp_release_cpus() does nothing for them + */ +#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_FSL_BOOK3E) /* Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids */ @@ -605,6 +636,8 @@ void __init setup_arch(char **cmdline_p) /* Initialize the MMU context management stuff */ mmu_context_init(); + epapr_paravirt_init(); + kvm_linear_init(); /* Interrupt code needs to be 64K-aligned */ diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 793401e..e6f5d08 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -81,6 +81,29 @@ int smt_enabled_at_boot = 1; static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; +/* + * Returns 1 if the specified cpu should be brought up during boot. + * Used to inhibit booting threads if they've been disabled or + * limited on the command line + */ +int smp_generic_cpu_bootable(unsigned int nr) +{ + /* Special case - we inhibit secondary thread startup + * during boot if the user requests it. + */ + if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { + if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) + return 0; + if (smt_enabled_at_boot + && cpu_thread_in_core(nr) >= smt_enabled_at_boot) + return 0; + } + + return 1; +} +EXPORT_SYMBOL(smp_generic_cpu_bootable); + + #ifdef CONFIG_PPC64 int smp_generic_kick_cpu(int nr) { @@ -381,14 +404,31 @@ int generic_cpu_disable(void) return 0; } +/** + * platform_cpu_die() - do platform related operations on the boot cpu, after + * the cpu_state of the dying cpu is assigned to CPU_DEAD. Platform + * implementations can override this. + * + * @cpu: the cpu to die + */ +void __attribute__ ((weak)) platform_cpu_die(unsigned int cpu) +{ + return; +} + void generic_cpu_die(unsigned int cpu) { int i; for (i = 0; i < 100; i++) { smp_rmb(); - if (per_cpu(cpu_state, cpu) == CPU_DEAD) + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + platform_cpu_die(cpu); +#ifdef CONFIG_PPC64 + paca[cpu].cpu_start = 0; +#endif return; + } msleep(100); } printk(KERN_ERR "CPU%d didn't die...\n", cpu); diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 86ac1d9..608e4ceb 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -46,10 +46,29 @@ #define SL_r29 0xe8 #define SL_r30 0xf0 #define SL_r31 0xf8 -#define SL_SIZE SL_r31+8 +#define SL_SPRG0 0x100 +#define SL_SPRG1 0x108 +#define SL_SPRG2 0x110 +#define SL_SPRG3 0x118 +#define SL_SPRG4 0x120 +#define SL_SPRG5 0x128 +#define SL_SPRG6 0x130 +#define SL_SPRG7 0x138 +#define SL_TCR 0x140 +#define SL_SIZE SL_TCR+8 /* these macros rely on the save area being * pointed to by r11 */ + +#define SAVE_SPR(register) \ + mfspr r0,SPRN_##register ;\ + std r0,SL_##register(r11) +#define RESTORE_SPR(register) \ + ld r0,SL_##register(r11) ;\ + mtspr SPRN_##register,r0 +#define RESTORE_SPRG(n) \ + ld r0,SL_SPRG##n(r11) ;\ + mtsprg n,r0 #define SAVE_SPECIAL(special) \ mf##special r0 ;\ std r0, SL_##special(r11) @@ -103,8 +122,21 @@ _GLOBAL(swsusp_arch_suspend) SAVE_REGISTER(r30) SAVE_REGISTER(r31) SAVE_SPECIAL(MSR) - SAVE_SPECIAL(SDR1) SAVE_SPECIAL(XER) +#ifdef CONFIG_PPC_BOOK3S_64 + SAVE_SPECIAL(SDR1) +#else + SAVE_SPR(TCR) + /* Save SPRGs */ + SAVE_SPR(SPRG0) + SAVE_SPR(SPRG1) + SAVE_SPR(SPRG2) + SAVE_SPR(SPRG3) + SAVE_SPR(SPRG4) + SAVE_SPR(SPRG5) + SAVE_SPR(SPRG6) + SAVE_SPR(SPRG7) +#endif /* we push the stack up 128 bytes but don't store the * stack pointer on the stack like a real stackframe */ @@ -151,6 +183,7 @@ copy_page_loop: bne+ copyloop nothing_to_copy: +#ifdef CONFIG_PPC_BOOK3S_64 /* flush caches */ lis r3, 0x10 mtctr r3 @@ -167,6 +200,7 @@ nothing_to_copy: sync tlbia +#endif ld r11,swsusp_save_area_ptr@toc(r2) @@ -208,16 +242,42 @@ nothing_to_copy: RESTORE_REGISTER(r29) RESTORE_REGISTER(r30) RESTORE_REGISTER(r31) + +#ifdef CONFIG_PPC_BOOK3S_64 /* can't use RESTORE_SPECIAL(MSR) */ ld r0, SL_MSR(r11) mtmsrd r0, 0 RESTORE_SPECIAL(SDR1) +#else + /* Save SPRGs */ + RESTORE_SPRG(0) + RESTORE_SPRG(1) + RESTORE_SPRG(2) + RESTORE_SPRG(3) + RESTORE_SPRG(4) + RESTORE_SPRG(5) + RESTORE_SPRG(6) + RESTORE_SPRG(7) + + RESTORE_SPECIAL(MSR) + + /* Restore TCR and clear any pending bits in TSR. */ + RESTORE_SPR(TCR) + lis r0, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h + mtspr SPRN_TSR,r0 + + /* Kick decrementer */ + li r0,1 + mtdec r0 +#endif RESTORE_SPECIAL(XER) sync addi r1,r1,-128 +#ifdef CONFIG_PPC_BOOK3S_64 bl slb_flush_and_rebolt +#endif bl do_after_copyback addi r1,r1,128 |