diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-11-07 00:06:55 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 00:56:47 (GMT) |
commit | 3c726f8dee6f55e96475574e9f645327e461884c (patch) | |
tree | f67c381e8f57959aa4a94bda4c68e24253cd8171 /arch/powerpc/kernel | |
parent | f912696ab330bf539231d1f8032320f2a08b850f (diff) | |
download | linux-3c726f8dee6f55e96475574e9f645327e461884c.tar.xz |
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel
base page size to 64K. The resulting kernel still boots on any
hardware. On current machines with 4K pages support only, the kernel
will maintain 16 "subpages" for each 64K page transparently.
Note that while real 64K capable HW has been tested, the current patch
will not enable it yet as such hardware is not released yet, and I'm
still verifying with the firmware architects the proper to get the
information from the newer hypervisors.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 300 | ||||
-rw-r--r-- | arch/powerpc/kernel/lparmap.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 76 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 31 |
7 files changed, 282 insertions, 140 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index bc5a368..b757572 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -125,6 +125,9 @@ int main(void) DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); +#ifdef CONFIG_PPC_64K_PAGES + DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir)); +#endif #ifdef CONFIG_HUGETLB_PAGE DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index b91345f..33c63bc 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -240,7 +240,7 @@ struct cpu_spec cpu_specs[] = { .oprofile_model = &op_model_power4, #endif }, - { /* Power5 */ + { /* Power5 GR */ .pvr_mask = 0xffff0000, .pvr_value = 0x003a0000, .cpu_name = "POWER5 (gr)", @@ -255,7 +255,7 @@ struct cpu_spec cpu_specs[] = { .oprofile_model = &op_model_power4, #endif }, - { /* Power5 */ + { /* Power5 GS */ .pvr_mask = 0xffff0000, .pvr_value = 0x003b0000, .cpu_name = "POWER5 (gs)", diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 45d8197..16ab40d 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -195,11 +195,11 @@ exception_marker: #define EX_R12 24 #define EX_R13 32 #define EX_SRR0 40 -#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ #define EX_DAR 48 -#define EX_LR 48 /* SLB miss saves LR, but not DAR */ #define EX_DSISR 56 #define EX_CCR 60 +#define EX_R3 64 +#define EX_LR 72 #define EXCEPTION_PROLOG_PSERIES(area, label) \ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ @@ -419,17 +419,22 @@ data_access_slb_pSeries: mtspr SPRN_SPRG1,r13 RUNLATCH_ON(r13) mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_DAR std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ + mfcr r9 +#ifdef __DISABLED__ + /* Keep that around for when we re-implement dynamic VSIDs */ + cmpdi r3,0 + bge slb_miss_user_pseries +#endif /* __DISABLED__ */ std r10,PACA_EXSLB+EX_R10(r13) std r11,PACA_EXSLB+EX_R11(r13) std r12,PACA_EXSLB+EX_R12(r13) - std r3,PACA_EXSLB+EX_R3(r13) - mfspr r9,SPRN_SPRG1 - std r9,PACA_EXSLB+EX_R13(r13) - mfcr r9 + mfspr r10,SPRN_SPRG1 + std r10,PACA_EXSLB+EX_R13(r13) mfspr r12,SPRN_SRR1 /* and SRR1 */ - mfspr r3,SPRN_DAR - b .do_slb_miss /* Rel. branch works in real mode */ + b .slb_miss_realmode /* Rel. branch works in real mode */ STD_EXCEPTION_PSERIES(0x400, instruction_access) @@ -440,17 +445,22 @@ instruction_access_slb_pSeries: mtspr SPRN_SPRG1,r13 RUNLATCH_ON(r13) mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ + mfcr r9 +#ifdef __DISABLED__ + /* Keep that around for when we re-implement dynamic VSIDs */ + cmpdi r3,0 + bge slb_miss_user_pseries +#endif /* __DISABLED__ */ std r10,PACA_EXSLB+EX_R10(r13) std r11,PACA_EXSLB+EX_R11(r13) std r12,PACA_EXSLB+EX_R12(r13) - std r3,PACA_EXSLB+EX_R3(r13) - mfspr r9,SPRN_SPRG1 - std r9,PACA_EXSLB+EX_R13(r13) - mfcr r9 + mfspr r10,SPRN_SPRG1 + std r10,PACA_EXSLB+EX_R13(r13) mfspr r12,SPRN_SRR1 /* and SRR1 */ - mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ - b .do_slb_miss /* Rel. branch works in real mode */ + b .slb_miss_realmode /* Rel. branch works in real mode */ STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) STD_EXCEPTION_PSERIES(0x600, alignment) @@ -509,6 +519,38 @@ _GLOBAL(do_stab_bolted_pSeries) EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) /* + * We have some room here we use that to put + * the peries slb miss user trampoline code so it's reasonably + * away from slb_miss_user_common to avoid problems with rfid + * + * This is used for when the SLB miss handler has to go virtual, + * which doesn't happen for now anymore but will once we re-implement + * dynamic VSIDs for shared page tables + */ +#ifdef __DISABLED__ +slb_miss_user_pseries: + std r10,PACA_EXGEN+EX_R10(r13) + std r11,PACA_EXGEN+EX_R11(r13) + std r12,PACA_EXGEN+EX_R12(r13) + mfspr r10,SPRG1 + ld r11,PACA_EXSLB+EX_R9(r13) + ld r12,PACA_EXSLB+EX_R3(r13) + std r10,PACA_EXGEN+EX_R13(r13) + std r11,PACA_EXGEN+EX_R9(r13) + std r12,PACA_EXGEN+EX_R3(r13) + clrrdi r12,r13,32 + mfmsr r10 + mfspr r11,SRR0 /* save SRR0 */ + ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ + ori r10,r10,MSR_IR|MSR_DR|MSR_RI + mtspr SRR0,r12 + mfspr r12,SRR1 /* and SRR1 */ + mtspr SRR1,r10 + rfid + b . /* prevent spec. execution */ +#endif /* __DISABLED__ */ + +/* * Vectors for the FWNMI option. Share common code. */ .globl system_reset_fwnmi @@ -559,22 +601,59 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) .globl data_access_slb_iSeries data_access_slb_iSeries: mtspr SPRN_SPRG1,r13 /* save r13 */ - EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) + mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) - ld r12,PACALPPACA+LPPACASRR1(r13) mfspr r3,SPRN_DAR - b .do_slb_miss + std r9,PACA_EXSLB+EX_R9(r13) + mfcr r9 +#ifdef __DISABLED__ + cmpdi r3,0 + bge slb_miss_user_iseries +#endif + std r10,PACA_EXSLB+EX_R10(r13) + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + mfspr r10,SPRN_SPRG1 + std r10,PACA_EXSLB+EX_R13(r13) + ld r12,PACALPPACA+LPPACASRR1(r13); + b .slb_miss_realmode STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) .globl instruction_access_slb_iSeries instruction_access_slb_iSeries: mtspr SPRN_SPRG1,r13 /* save r13 */ - EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) + mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) - ld r12,PACALPPACA+LPPACASRR1(r13) - ld r3,PACALPPACA+LPPACASRR0(r13) - b .do_slb_miss + ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ + std r9,PACA_EXSLB+EX_R9(r13) + mfcr r9 +#ifdef __DISABLED__ + cmpdi r3,0 + bge .slb_miss_user_iseries +#endif + std r10,PACA_EXSLB+EX_R10(r13) + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + mfspr r10,SPRN_SPRG1 + std r10,PACA_EXSLB+EX_R13(r13) + ld r12,PACALPPACA+LPPACASRR1(r13); + b .slb_miss_realmode + +#ifdef __DISABLED__ +slb_miss_user_iseries: + std r10,PACA_EXGEN+EX_R10(r13) + std r11,PACA_EXGEN+EX_R11(r13) + std r12,PACA_EXGEN+EX_R12(r13) + mfspr r10,SPRG1 + ld r11,PACA_EXSLB+EX_R9(r13) + ld r12,PACA_EXSLB+EX_R3(r13) + std r10,PACA_EXGEN+EX_R13(r13) + std r11,PACA_EXGEN+EX_R9(r13) + std r12,PACA_EXGEN+EX_R3(r13) + EXCEPTION_PROLOG_ISERIES_2 + b slb_miss_user_common +#endif MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) @@ -809,6 +888,126 @@ instruction_access_common: li r5,0x400 b .do_hash_page /* Try to handle as hpte fault */ +/* + * Here is the common SLB miss user that is used when going to virtual + * mode for SLB misses, that is currently not used + */ +#ifdef __DISABLED__ + .align 7 + .globl slb_miss_user_common +slb_miss_user_common: + mflr r10 + std r3,PACA_EXGEN+EX_DAR(r13) + stw r9,PACA_EXGEN+EX_CCR(r13) + std r10,PACA_EXGEN+EX_LR(r13) + std r11,PACA_EXGEN+EX_SRR0(r13) + bl .slb_allocate_user + + ld r10,PACA_EXGEN+EX_LR(r13) + ld r3,PACA_EXGEN+EX_R3(r13) + lwz r9,PACA_EXGEN+EX_CCR(r13) + ld r11,PACA_EXGEN+EX_SRR0(r13) + mtlr r10 + beq- slb_miss_fault + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- unrecov_user_slb + mfmsr r10 + +.machine push +.machine "power4" + mtcrf 0x80,r9 +.machine pop + + clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ + mtmsrd r10,1 + + mtspr SRR0,r11 + mtspr SRR1,r12 + + ld r9,PACA_EXGEN+EX_R9(r13) + ld r10,PACA_EXGEN+EX_R10(r13) + ld r11,PACA_EXGEN+EX_R11(r13) + ld r12,PACA_EXGEN+EX_R12(r13) + ld r13,PACA_EXGEN+EX_R13(r13) + rfid + b . + +slb_miss_fault: + EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) + ld r4,PACA_EXGEN+EX_DAR(r13) + li r5,0 + std r4,_DAR(r1) + std r5,_DSISR(r1) + b .handle_page_fault + +unrecov_user_slb: + EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) + DISABLE_INTS + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + +#endif /* __DISABLED__ */ + + +/* + * r13 points to the PACA, r9 contains the saved CR, + * r12 contain the saved SRR1, SRR0 is still ready for return + * r3 has the faulting address + * r9 - r13 are saved in paca->exslb. + * r3 is saved in paca->slb_r3 + * We assume we aren't going to take any exceptions during this procedure. + */ +_GLOBAL(slb_miss_realmode) + mflr r10 + + stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ + std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + + bl .slb_allocate_realmode + + /* All done -- return from exception. */ + + ld r10,PACA_EXSLB+EX_LR(r13) + ld r3,PACA_EXSLB+EX_R3(r13) + lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ +#ifdef CONFIG_PPC_ISERIES + ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ +#endif /* CONFIG_PPC_ISERIES */ + + mtlr r10 + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- unrecov_slb + +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + +#ifdef CONFIG_PPC_ISERIES + mtspr SPRN_SRR0,r11 + mtspr SPRN_SRR1,r12 +#endif /* CONFIG_PPC_ISERIES */ + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + rfid + b . /* prevent speculative execution */ + +unrecov_slb: + EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) + DISABLE_INTS + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + .align 7 .globl hardware_interrupt_common .globl hardware_interrupt_entry @@ -1139,62 +1338,6 @@ _GLOBAL(do_stab_bolted) b . /* prevent speculative execution */ /* - * r13 points to the PACA, r9 contains the saved CR, - * r11 and r12 contain the saved SRR0 and SRR1. - * r3 has the faulting address - * r9 - r13 are saved in paca->exslb. - * r3 is saved in paca->slb_r3 - * We assume we aren't going to take any exceptions during this procedure. - */ -_GLOBAL(do_slb_miss) - mflr r10 - - stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ - std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - - bl .slb_allocate /* handle it */ - - /* All done -- return from exception. */ - - ld r10,PACA_EXSLB+EX_LR(r13) - ld r3,PACA_EXSLB+EX_R3(r13) - lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ -#ifdef CONFIG_PPC_ISERIES - ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ -#endif /* CONFIG_PPC_ISERIES */ - - mtlr r10 - - andi. r10,r12,MSR_RI /* check for unrecoverable exception */ - beq- unrecov_slb - -.machine push -.machine "power4" - mtcrf 0x80,r9 - mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ -.machine pop - -#ifdef CONFIG_PPC_ISERIES - mtspr SPRN_SRR0,r11 - mtspr SPRN_SRR1,r12 -#endif /* CONFIG_PPC_ISERIES */ - ld r9,PACA_EXSLB+EX_R9(r13) - ld r10,PACA_EXSLB+EX_R10(r13) - ld r11,PACA_EXSLB+EX_R11(r13) - ld r12,PACA_EXSLB+EX_R12(r13) - ld r13,PACA_EXSLB+EX_R13(r13) - rfid - b . /* prevent speculative execution */ - -unrecov_slb: - EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) - DISABLE_INTS - bl .save_nvgprs -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception - b 1b - -/* * Space for CPU0's segment table. * * On iSeries, the hypervisor must fill in at least one entry before @@ -1569,7 +1712,10 @@ _GLOBAL(__secondary_start) #endif /* Initialize the first segment table (or SLB) entry */ ld r3,PACASTABVIRT(r13) /* get addr of segment table */ +BEGIN_FTR_SECTION bl .stab_initialize +END_FTR_SECTION_IFCLR(CPU_FTR_SLB) + bl .slb_initialize /* Initialize the kernel stack. Just a repeat for iSeries. */ LOADADDR(r3,current_set) diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c index eded971..5a05a79 100644 --- a/arch/powerpc/kernel/lparmap.c +++ b/arch/powerpc/kernel/lparmap.c @@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { .xRanges = { { .xPages = HvPagesToMap, .xOffset = 0, - .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT), + .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT), }, }, }; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9684321..7f64f04 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -554,12 +554,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_SLB)) { unsigned long sp_vsid = get_kernel_vsid(sp); + unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; sp_vsid <<= SLB_VSID_SHIFT; - sp_vsid |= SLB_VSID_KERNEL; - if (cpu_has_feature(CPU_FTR_16M_PAGE)) - sp_vsid |= SLB_VSID_L; - + sp_vsid |= SLB_VSID_KERNEL | llp; p->thread.ksp_vsid = sp_vsid; } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index eec2da6..3675ef4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -724,10 +724,10 @@ static inline char *find_flat_dt_string(u32 offset) * used to extract the memory informations at boot before we can * unflatten the tree */ -static int __init scan_flat_dt(int (*it)(unsigned long node, - const char *uname, int depth, - void *data), - void *data) +int __init of_scan_flat_dt(int (*it)(unsigned long node, + const char *uname, int depth, + void *data), + void *data) { unsigned long p = ((unsigned long)initial_boot_params) + initial_boot_params->off_dt_struct; @@ -784,8 +784,8 @@ static int __init scan_flat_dt(int (*it)(unsigned long node, * This function can be used within scan_flattened_dt callback to get * access to properties */ -static void* __init get_flat_dt_prop(unsigned long node, const char *name, - unsigned long *size) +void* __init of_get_flat_dt_prop(unsigned long node, const char *name, + unsigned long *size) { unsigned long p = node; @@ -1087,7 +1087,7 @@ void __init unflatten_device_tree(void) static int __init early_init_dt_scan_cpus(unsigned long node, const char *uname, int depth, void *data) { - char *type = get_flat_dt_prop(node, "device_type", NULL); + char *type = of_get_flat_dt_prop(node, "device_type", NULL); u32 *prop; unsigned long size = 0; @@ -1095,19 +1095,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node, if (type == NULL || strcmp(type, "cpu") != 0) return 0; -#ifdef CONFIG_PPC_PSERIES - /* On LPAR, look for the first ibm,pft-size property for the hash table size - */ - if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) { - u32 *pft_size; - pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL); - if (pft_size != NULL) { - /* pft_size[0] is the NUMA CEC cookie */ - ppc64_pft_size = pft_size[1]; - } - } -#endif - boot_cpuid = 0; boot_cpuid_phys = 0; if (initial_boot_params && initial_boot_params->version >= 2) { @@ -1117,8 +1104,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, boot_cpuid_phys = initial_boot_params->boot_cpuid_phys; } else { /* Check if it's the boot-cpu, set it's hw index now */ - if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { - prop = get_flat_dt_prop(node, "reg", NULL); + if (of_get_flat_dt_prop(node, + "linux,boot-cpu", NULL) != NULL) { + prop = of_get_flat_dt_prop(node, "reg", NULL); if (prop != NULL) boot_cpuid_phys = *prop; } @@ -1127,14 +1115,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node, #ifdef CONFIG_ALTIVEC /* Check if we have a VMX and eventually update CPU features */ - prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size); + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size); if (prop && (*prop) > 0) { cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; } /* Same goes for Apple's "altivec" property */ - prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL); + prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); if (prop) { cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; @@ -1147,7 +1135,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * this by looking at the size of the ibm,ppc-interrupt-server#s * property */ - prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &size); cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; if (prop && ((size / sizeof(u32)) > 1)) @@ -1170,7 +1158,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node, return 0; /* get platform type */ - prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL); + prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL); if (prop == NULL) return 0; #ifdef CONFIG_PPC64 @@ -1183,21 +1171,21 @@ static int __init early_init_dt_scan_chosen(unsigned long node, #ifdef CONFIG_PPC64 /* check if iommu is forced on or off */ - if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) + if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) iommu_is_off = 1; - if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) + if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) iommu_force_on = 1; #endif - lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL); + lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); if (lprop) memory_limit = *lprop; #ifdef CONFIG_PPC64 - lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); + lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); if (lprop) tce_alloc_start = *lprop; - lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); + lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); if (lprop) tce_alloc_end = *lprop; #endif @@ -1209,9 +1197,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node, { u64 *basep, *entryp; - basep = get_flat_dt_prop(node, "linux,rtas-base", NULL); - entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL); - prop = get_flat_dt_prop(node, "linux,rtas-size", NULL); + basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); + entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); + prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); if (basep && entryp && prop) { rtas.base = *basep; rtas.entry = *entryp; @@ -1232,11 +1220,11 @@ static int __init early_init_dt_scan_root(unsigned long node, if (depth != 0) return 0; - prop = get_flat_dt_prop(node, "#size-cells", NULL); + prop = of_get_flat_dt_prop(node, "#size-cells", NULL); dt_root_size_cells = (prop == NULL) ? 1 : *prop; DBG("dt_root_size_cells = %x\n", dt_root_size_cells); - prop = get_flat_dt_prop(node, "#address-cells", NULL); + prop = of_get_flat_dt_prop(node, "#address-cells", NULL); dt_root_addr_cells = (prop == NULL) ? 2 : *prop; DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); @@ -1271,7 +1259,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) static int __init early_init_dt_scan_memory(unsigned long node, const char *uname, int depth, void *data) { - char *type = get_flat_dt_prop(node, "device_type", NULL); + char *type = of_get_flat_dt_prop(node, "device_type", NULL); cell_t *reg, *endp; unsigned long l; @@ -1279,7 +1267,7 @@ static int __init early_init_dt_scan_memory(unsigned long node, if (type == NULL || strcmp(type, "memory") != 0) return 0; - reg = (cell_t *)get_flat_dt_prop(node, "reg", &l); + reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); if (reg == NULL) return 0; @@ -1343,12 +1331,12 @@ void __init early_init_devtree(void *params) * device-tree, including the platform type, initrd location and * size, TCE reserve, and more ... */ - scan_flat_dt(early_init_dt_scan_chosen, NULL); + of_scan_flat_dt(early_init_dt_scan_chosen, NULL); /* Scan memory nodes and rebuild LMBs */ lmb_init(); - scan_flat_dt(early_init_dt_scan_root, NULL); - scan_flat_dt(early_init_dt_scan_memory, NULL); + of_scan_flat_dt(early_init_dt_scan_root, NULL); + of_scan_flat_dt(early_init_dt_scan_memory, NULL); lmb_enforce_memory_limit(memory_limit); lmb_analyze(); #ifdef CONFIG_PPC64 @@ -1363,10 +1351,10 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); - /* Retreive hash table size from flattened tree plus other - * CPU related informations (altivec support, boot CPU ID, ...) + /* Retreive CPU related informations from the flat tree + * (altivec support, boot CPU ID, ...) */ - scan_flat_dt(early_init_dt_scan_cpus, NULL); + of_scan_flat_dt(early_init_dt_scan_cpus, NULL); DBG(" <- early_init_devtree()\n"); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6b52cce..b099405 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -277,16 +277,21 @@ void __init early_setup(unsigned long dt_ptr) DBG("Found, Initializing memory management...\n"); /* - * Initialize stab / SLB management + * Initialize the MMU Hash table and create the linear mapping + * of memory. Has to be done before stab/slb initialization as + * this is currently where the page size encoding is obtained */ - if (!firmware_has_feature(FW_FEATURE_ISERIES)) - stab_initialize(lpaca->stab_real); + htab_initialize(); /* - * Initialize the MMU Hash table and create the linear mapping - * of memory + * Initialize stab / SLB management except on iSeries */ - htab_initialize(); + if (!firmware_has_feature(FW_FEATURE_ISERIES)) { + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else + stab_initialize(lpaca->stab_real); + } DBG(" <- early_setup()\n"); } @@ -552,10 +557,12 @@ static void __init irqstack_early_init(void) * SLB misses on them. */ for_each_cpu(i) { - softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, - THREAD_SIZE, 0x10000000)); - hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, - THREAD_SIZE, 0x10000000)); + softirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc_base(THREAD_SIZE, + THREAD_SIZE, 0x10000000)); + hardirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc_base(THREAD_SIZE, + THREAD_SIZE, 0x10000000)); } } #else @@ -583,8 +590,8 @@ static void __init emergency_stack_init(void) limit = min(0x10000000UL, lmb.rmo_size); for_each_cpu(i) - paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128, - limit)) + PAGE_SIZE; + paca[i].emergency_sp = + __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; } /* |