summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8d97eb4..c213b45 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -66,6 +66,7 @@
#include <asm/code-patching.h>
#include <asm/kvm_ppc.h>
#include <asm/hugetlb.h>
+#include <asm/epapr_hcalls.h>
#include "setup.h"
@@ -102,6 +103,30 @@ int ucache_bsize;
static char *smt_enabled_cmdline;
+#ifdef CONFIG_PPC_BOOK3E
+static void setup_tlb_per_core(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ int first = cpu_first_thread_sibling(cpu);
+
+ paca[cpu].tlb_per_core_ptr =
+ (uintptr_t)&paca[first].tlb_per_core;
+
+ /* If we have threads but no tlbsrx., use a per-core lock */
+ if (smt_enabled_at_boot >= 2 &&
+ !mmu_has_feature(MMU_FTR_USE_TLBRSRV))
+ paca[cpu].tlb_per_core_ptr |= TLB_PER_CORE_HAS_LOCK;
+ }
+}
+#else
+static void setup_tlb_per_core(void)
+{
+}
+#endif
+
+
/* Look for ibm,smt-enabled OF option */
static void check_smt_enabled(void)
{
@@ -142,6 +167,8 @@ static void check_smt_enabled(void)
of_node_put(dn);
}
}
+
+ setup_tlb_per_core();
}
/* Look for smt-enabled= cmdline option */
@@ -429,7 +456,11 @@ void __init setup_system(void)
smp_setup_cpu_maps();
check_smt_enabled();
-#ifdef CONFIG_SMP
+ /*
+ * Freescale Book3e parts spin in a loop provided by firmware,
+ * so smp_release_cpus() does nothing for them
+ */
+#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_FSL_BOOK3E)
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
@@ -605,6 +636,8 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
+ epapr_paravirt_init();
+
kvm_linear_init();
/* Interrupt code needs to be 64K-aligned */