summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2013-02-13 16:21:36 (GMT)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-02-15 05:58:52 (GMT)
commita2dcbb32f0647a3b8230c146a2f980654a7738af (patch)
treece51790a7a8cd7e869805e16658c9fcafa7a806c /arch
parent98ae22e15b430bfed5def01ac1a88ec9396284c8 (diff)
downloadlinux-a2dcbb32f0647a3b8230c146a2f980654a7738af.tar.xz
powerpc: Add FP/VSX and VMX register load functions for transactional memory
This adds functions to restore the state of the FP/VSX registers from what's stored in the thread_struct. Two version for FP/VSX are required since one restores them from transactional/checkpoint side of the thread_struct and the other from the speculated side. Similar functions are added for VMX registers. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/fpu.S54
-rw-r--r--arch/powerpc/kernel/vector.S51
2 files changed, 105 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index adb1551..caeaabf 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -62,6 +62,60 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
__REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Wrapper to call load_up_fpu from C.
+ * void do_load_up_fpu(struct pt_regs *regs);
+ */
+_GLOBAL(do_load_up_fpu)
+ mflr r0
+ std r0, 16(r1)
+ stdu r1, -112(r1)
+
+ subi r6, r3, STACK_FRAME_OVERHEAD
+ /* load_up_fpu expects r12=MSR, r13=PACA, and returns
+ * with r12 = new MSR.
+ */
+ ld r12,_MSR(r6)
+ GET_PACA(r13)
+
+ bl load_up_fpu
+ std r12,_MSR(r6)
+
+ ld r0, 112+16(r1)
+ addi r1, r1, 112
+ mtlr r0
+ blr
+
+
+/* void do_load_up_transact_fpu(struct thread_struct *thread)
+ *
+ * This is similar to load_up_fpu but for the transactional version of the FP
+ * register set. It doesn't mess with the task MSR or valid flags.
+ * Furthermore, we don't do lazy FP with TM currently.
+ */
+_GLOBAL(do_load_up_transact_fpu)
+ mfmsr r6
+ ori r5,r6,MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r5,r5,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ SYNC
+ MTMSRD(r5)
+
+ lfd fr0,THREAD_TRANSACT_FPSCR(r3)
+ MTFSF_L(fr0)
+ REST_32FPVSRS_TRANSACT(0, R4, R3)
+
+ /* FP/VSX off again */
+ MTMSRD(r6)
+ SYNC
+
+ blr
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index e830289..9e20999 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -7,6 +7,57 @@
#include <asm/page.h>
#include <asm/ptrace.h>
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Wrapper to call load_up_altivec from C.
+ * void do_load_up_altivec(struct pt_regs *regs);
+ */
+_GLOBAL(do_load_up_altivec)
+ mflr r0
+ std r0, 16(r1)
+ stdu r1, -112(r1)
+
+ subi r6, r3, STACK_FRAME_OVERHEAD
+ /* load_up_altivec expects r12=MSR, r13=PACA, and returns
+ * with r12 = new MSR.
+ */
+ ld r12,_MSR(r6)
+ GET_PACA(r13)
+ bl load_up_altivec
+ std r12,_MSR(r6)
+
+ ld r0, 112+16(r1)
+ addi r1, r1, 112
+ mtlr r0
+ blr
+
+/* void do_load_up_transact_altivec(struct thread_struct *thread)
+ *
+ * This is similar to load_up_altivec but for the transactional version of the
+ * vector regs. It doesn't mess with the task MSR or valid flags.
+ * Furthermore, VEC laziness is not supported with TM currently.
+ */
+_GLOBAL(do_load_up_transact_altivec)
+ mfmsr r6
+ oris r5,r6,MSR_VEC@h
+ MTMSRD(r5)
+ isync
+
+ li r4,1
+ stw r4,THREAD_USED_VR(r3)
+
+ li r10,THREAD_TRANSACT_VSCR
+ lvx vr0,r10,r3
+ mtvscr vr0
+ REST_32VRS_TRANSACT(0,r4,r3)
+
+ /* Disable VEC again. */
+ MTMSRD(r6)
+ isync
+
+ blr
+#endif
+
/*
* load_up_altivec(unused, unused, tsk)
* Disable VMX for the task which had it previously,