summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-10 23:00:29 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-10 23:00:29 (GMT)
commit59abe64904d98a6a4069595ccb85d2bbc8dd437d (patch)
treea203747a814721a958a0d806ed34d1be33e3644b /arch/powerpc
parent9733c8d0e3ffe281c7c1ecdfef751f694e19d82c (diff)
parent005e855f53957216f57547d0df8ea691556e2cb8 (diff)
downloadlinux-fsl-qoriq-59abe64904d98a6a4069595ccb85d2bbc8dd437d.tar.xz
Merge branch 'sdk-kernel-3.12' into sdk-v1.6.x
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h95
-rw-r--r--arch/powerpc/include/asm/processor.h47
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h2
-rw-r--r--arch/powerpc/kernel/align.c6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c27
-rw-r--r--arch/powerpc/kernel/fpu.S86
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/process.c15
-rw-r--r--arch/powerpc/kernel/ptrace.c49
-rw-r--r--arch/powerpc/kernel/ptrace32.c11
-rw-r--r--arch/powerpc/kernel/signal_32.c72
-rw-r--r--arch/powerpc/kernel/signal_64.c29
-rw-r--r--arch/powerpc/kernel/tm.S41
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/vecemu.c6
-rw-r--r--arch/powerpc/kernel/vector.S80
-rw-r--r--arch/powerpc/kvm/book3s_pr.c36
-rw-r--r--arch/powerpc/kvm/booke.c19
-rw-r--r--arch/powerpc/kvm/e500.c4
-rw-r--r--arch/powerpc/kvm/e500mc.c4
20 files changed, 279 insertions, 364 deletions
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c2dcfaa..eaf69a6 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -98,123 +98,40 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b
+#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b
+#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
-/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
- * thread_struct:
- */
-#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \
- 8*TS_FPRWIDTH*(n)(base)
-#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \
- SAVE_FPR_TRANSACT(n+1, base)
-#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \
- SAVE_2FPRS_TRANSACT(n+2, base)
-#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \
- SAVE_4FPRS_TRANSACT(n+4, base)
-#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \
- SAVE_8FPRS_TRANSACT(n+8, base)
-#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \
- SAVE_16FPRS_TRANSACT(n+16, base)
-
-#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \
- 8*TS_FPRWIDTH*(n)(base)
-#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \
- REST_FPR_TRANSACT(n+1, base)
-#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \
- REST_2FPRS_TRANSACT(n+2, base)
-#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \
- REST_4FPRS_TRANSACT(n+4, base)
-#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \
- REST_8FPRS_TRANSACT(n+8, base)
-#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \
- REST_16FPRS_TRANSACT(n+16, base)
-
-
-#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
- stvx n,b,base
-#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \
- SAVE_VR_TRANSACT(n+1,b,base)
-#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \
- SAVE_2VRS_TRANSACT(n+2,b,base)
-#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \
- SAVE_4VRS_TRANSACT(n+4,b,base)
-#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \
- SAVE_8VRS_TRANSACT(n+8,b,base)
-#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \
- SAVE_16VRS_TRANSACT(n+16,b,base)
-
-#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
- lvx n,b,base
-#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \
- REST_VR_TRANSACT(n+1,b,base)
-#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \
- REST_2VRS_TRANSACT(n+2,b,base)
-#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \
- REST_4VRS_TRANSACT(n+4,b,base)
-#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \
- REST_8VRS_TRANSACT(n+8,b,base)
-#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \
- REST_16VRS_TRANSACT(n+16,b,base)
-
-
-#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
- STXVD2X(n,R##base,R##b)
-#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \
- SAVE_VSR_TRANSACT(n+1,b,base)
-#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \
- SAVE_2VSRS_TRANSACT(n+2,b,base)
-#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \
- SAVE_4VSRS_TRANSACT(n+4,b,base)
-#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \
- SAVE_8VSRS_TRANSACT(n+8,b,base)
-#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
- SAVE_16VSRS_TRANSACT(n+16,b,base)
-
-#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
- LXVD2X(n,R##base,R##b)
-#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \
- REST_VSR_TRANSACT(n+1,b,base)
-#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \
- REST_2VSRS_TRANSACT(n+2,b,base)
-#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \
- REST_4VSRS_TRANSACT(n+4,b,base)
-#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \
- REST_8VSRS_TRANSACT(n+8,b,base)
-#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
- REST_16VSRS_TRANSACT(n+16,b,base)
-
/* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b)
+#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X(n,R##base,R##b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b)
+#define REST_VSR(n,b,base) li b,16*(n); LXVD2X(n,R##base,R##b)
#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 75a9e5a..ef64aa9 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -144,8 +144,20 @@ typedef struct {
#define TS_FPROFFSET 0
#define TS_VSRLOWOFFSET 1
-#define TS_FPR(i) fpr[i][TS_FPROFFSET]
-#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
+
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+ u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+ u64 fpscr; /* Floating point status */
+};
+
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+ vector128 vr[32] __attribute__((aligned(16)));
+ vector128 vscr __attribute__((aligned(16)));
+};
struct debug_reg {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -205,13 +217,8 @@ struct thread_struct {
/* Debug Registers */
struct debug_reg debug;
- /* FP and VSX 0-31 register set */
- double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
- struct {
-
- unsigned int pad;
- unsigned int val; /* Floating point status */
- } fpscr;
+ struct thread_fp_state fp_state;
+ struct thread_fp_state *fp_save_area;
int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64
@@ -229,10 +236,8 @@ struct thread_struct {
struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC
- /* Complete AltiVec register set */
- vector128 vr[32] __attribute__((aligned(16)));
- /* AltiVec status */
- vector128 vscr __attribute__((aligned(16)));
+ struct thread_vr_state vr_state;
+ struct thread_vr_state *vr_save_area;
unsigned long vrsave;
int used_vr; /* set if process has used altivec */
#endif /* CONFIG_ALTIVEC */
@@ -269,13 +274,8 @@ struct thread_struct {
* transact_fpr[] is the new set of transactional values.
* VRs work the same way.
*/
- double transact_fpr[32][TS_FPRWIDTH];
- struct {
- unsigned int pad;
- unsigned int val; /* Floating point status */
- } transact_fpscr;
- vector128 transact_vr[32] __attribute__((aligned(16)));
- vector128 transact_vscr __attribute__((aligned(16)));
+ struct thread_fp_state transact_fp;
+ struct thread_vr_state transact_vr;
unsigned long transact_vrsave;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -329,8 +329,6 @@ struct thread_struct {
.ksp = INIT_SP, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
- .fpr = {{0}}, \
- .fpscr = { .val = 0, }, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
}
@@ -368,6 +366,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
static inline unsigned int __unpack_fe01(unsigned long msr_bits)
{
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 3a7a67a..d89beab 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -125,7 +125,7 @@
#define FP_EX_DIVZERO (1 << (31 - 5))
#define FP_EX_INEXACT (1 << (31 - 6))
-#define __FPU_FPSCR (current->thread.fpscr.val)
+#define __FPU_FPSCR (current->thread.fp_state.fpscr)
/* We only actually write to the destination register
* if exceptions signalled (if any) will not trap.
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index a27ccd5..eaa16bc 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -660,7 +660,7 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
if (reg < 32)
ptr = (char *) &current->thread.TS_FPR(reg);
else
- ptr = (char *) &current->thread.vr[reg - 32];
+ ptr = (char *) &current->thread.vr_state.vr[reg - 32];
lptr = (unsigned long *) ptr;
@@ -897,7 +897,7 @@ int fix_alignment(struct pt_regs *regs)
return -EFAULT;
}
} else if (flags & F) {
- data.dd = current->thread.TS_FPR(reg);
+ data.ll = current->thread.TS_FPR(reg);
if (flags & S) {
/* Single-precision FP store requires conversion... */
#ifdef CONFIG_PPC_FPU
@@ -975,7 +975,7 @@ int fix_alignment(struct pt_regs *regs)
if (unlikely(ret))
return -EFAULT;
} else if (flags & F)
- current->thread.TS_FPR(reg) = data.dd;
+ current->thread.TS_FPR(reg) = data.ll;
else
regs->gpr[reg] = data.ll;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 53fe65e..d6bb592 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -90,16 +90,17 @@ int main(void)
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
#endif
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
- DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
- DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+ DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
+ DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
+ DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
#ifdef CONFIG_ALTIVEC
- DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+ DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
+ DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
- DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+ DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
- DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
@@ -143,20 +144,12 @@ int main(void)
DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
- DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
- transact_vr[0]));
- DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct,
- transact_vscr));
+ DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
+ transact_vr));
DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
transact_vrsave));
- DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct,
- transact_fpr[0]));
- DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct,
- transact_fpscr));
-#ifdef CONFIG_VSX
- DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct,
- transact_fpr[0]));
-#endif
+ DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
+ transact_fp));
/* Local pt_regs on stack for Transactional Memory funcs. */
DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
sizeof(struct pt_regs) + 16);
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index caeaabf..f7f5b8b 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: REST_32VSRS(n,c,base); \
3:
-#define __REST_32FPVSRS_TRANSACT(n,c,base) \
-BEGIN_FTR_SECTION \
- b 2f; \
-END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
- REST_32FPRS_TRANSACT(n,base); \
- b 3f; \
-2: REST_32VSRS_TRANSACT(n,c,base); \
-3:
-
#define __SAVE_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
3:
#else
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
-#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
-#define REST_32FPVSRS_TRANSACT(n,c,base) \
- __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_fpu from C.
- * void do_load_up_fpu(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_fpu)
- mflr r0
- std r0, 16(r1)
- stdu r1, -112(r1)
-
- subi r6, r3, STACK_FRAME_OVERHEAD
- /* load_up_fpu expects r12=MSR, r13=PACA, and returns
- * with r12 = new MSR.
- */
- ld r12,_MSR(r6)
- GET_PACA(r13)
-
- bl load_up_fpu
- std r12,_MSR(r6)
-
- ld r0, 112+16(r1)
- addi r1, r1, 112
- mtlr r0
- blr
-
-
/* void do_load_up_transact_fpu(struct thread_struct *thread)
*
* This is similar to load_up_fpu but for the transactional version of the FP
@@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
SYNC
MTMSRD(r5)
- lfd fr0,THREAD_TRANSACT_FPSCR(r3)
+ addi r7,r3,THREAD_TRANSACT_FPSTATE
+ lfd fr0,FPSTATE_FPSCR(r7)
MTFSF_L(fr0)
- REST_32FPVSRS_TRANSACT(0, R4, R3)
+ REST_32FPVSRS(0, R4, R7)
/* FP/VSX off again */
MTMSRD(r6)
@@ -117,11 +81,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
+ * Load state from memory into FP registers including FPSCR.
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(load_fp_state)
+ lfd fr0,FPSTATE_FPSCR(r3)
+ MTFSF_L(fr0)
+ REST_32FPVSRS(0, R4, R3)
+ blr
+
+/*
+ * Store FP state into memory, including FPSCR
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(store_fp_state)
+ SAVE_32FPVSRS(0, R4, R3)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r3)
+ blr
+
+/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_fpu)
mfmsr r5
@@ -147,9 +133,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
- SAVE_32FPVSRS(0, R5, R4)
+ addi r10,r4,THREAD_FPSTATE
+ SAVE_32FPVSRS(0, R5, R10)
mffs fr0
- stfd fr0,THREAD_FPSCR(r4)
+ stfd fr0,FPSTATE_FPSCR(r10)
PPC_LL r5,PT_REGS(r4)
toreal(r5)
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -160,7 +147,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_SMP */
/* enable use of FP after return */
#ifdef CONFIG_PPC32
- mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
+ mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
@@ -172,9 +159,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
or r12,r12,r4
std r12,_MSR(r1)
#endif
- lfd fr0,THREAD_FPSCR(r5)
+ addi r10,r5,THREAD_FPSTATE
+ lfd fr0,FPSTATE_FPSCR(r10)
MTFSF_L(fr0)
- REST_32FPVSRS(0, R4, R5)
+ REST_32FPVSRS(0, R4, R10)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
@@ -206,11 +194,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
PPC_LCMPI 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
+ PPC_LL r6,THREAD_FPSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3)
- PPC_LCMPI 0,r5,0
- SAVE_32FPVSRS(0, R4 ,R3)
+ PPC_LCMPI 0,r6,0
+ bne 2f
+ addi r6,r3,THREAD_FPSTATE
+2: PPC_LCMPI 0,r5,0
+ SAVE_32FPVSRS(0, R4, R6)
mffs fr0
- stfd fr0,THREAD_FPSCR(r3)
+ stfd fr0,FPSTATE_FPSCR(r6)
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 21646db..56a4bec 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -98,9 +98,13 @@ EXPORT_SYMBOL(start_thread);
#ifdef CONFIG_PPC_FPU
EXPORT_SYMBOL(giveup_fpu);
+EXPORT_SYMBOL(load_fp_state);
+EXPORT_SYMBOL(store_fp_state);
#endif
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
+EXPORT_SYMBOL(load_vr_state);
+EXPORT_SYMBOL(store_vr_state);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
EXPORT_SYMBOL(giveup_vsx);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 681ae5b..5b2490a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1018,6 +1018,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ptrace_bps[0] = NULL;
#endif
+ p->thread.fp_save_area = NULL;
+#ifdef CONFIG_ALTIVEC
+ p->thread.vr_save_area = NULL;
+#endif
+
#ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
@@ -1123,12 +1128,12 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
- memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
- current->thread.fpscr.val = 0;
+ memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
+ current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
- memset(current->thread.vr, 0, sizeof(current->thread.vr));
- memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
- current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
+ current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index bd92ba0..100e01c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
void *kbuf, void __user *ubuf)
{
#ifdef CONFIG_VSX
- double buf[33];
+ u64 buf[33];
int i;
#endif
flush_fp_to_thread(target);
@@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
- memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
+ buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#else
- BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
- offsetof(struct thread_struct, TS_FPR(32)));
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32][0]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpr, 0, -1);
+ &target->thread.fp_state, 0, -1);
#endif
}
@@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
- double buf[33];
+ u64 buf[33];
int i;
#endif
flush_fp_to_thread(target);
@@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
- memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
+ target->thread.fp_state.fpscr = buf[32];
return 0;
#else
- BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
- offsetof(struct thread_struct, TS_FPR(32)));
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32][0]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpr, 0, -1);
+ &target->thread.fp_state, 0, -1);
#endif
}
@@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
- BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
- offsetof(struct thread_struct, vr[32]));
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr, 0,
+ &target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret) {
/*
@@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
- BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
- offsetof(struct thread_struct, vr[32]));
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr, 0, 33 * sizeof(vector128));
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
@@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- double buf[32];
+ u64 buf[32];
int ret, i;
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
@@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- double buf[32];
+ u64 buf[32];
int ret,i;
flush_vsx_to_thread(target);
@@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
- target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
@@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- tmp = ((unsigned long *)child->thread.fpr)
+ tmp = ((unsigned long *)child->thread.fp_state.fpr)
[fpidx * TS_FPRWIDTH];
else
- tmp = child->thread.fpscr.val;
+ tmp = child->thread.fp_state.fpscr;
}
ret = put_user(tmp, datalp);
break;
@@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- ((unsigned long *)child->thread.fpr)
+ ((unsigned long *)child->thread.fp_state.fpr)
[fpidx * TS_FPRWIDTH] = data;
else
- child->thread.fpscr.val = data;
+ child->thread.fp_state.fpscr = data;
ret = 0;
}
break;
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 18c7c65..f52b7db 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -43,7 +43,6 @@
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
-#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
@@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
- tmp = ((unsigned int *)child->thread.fpr)
+ tmp = ((unsigned int *)child->thread.fp_state.fpr)
[FPRINDEX(index)];
}
ret = put_user((unsigned int)tmp, (u32 __user *)data);
@@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
- tmp = ((u64 *)child->thread.fpr)
- [FPRINDEX_3264(numReg)];
+ tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
} else { /* register within PT_REGS struct */
unsigned long tmp2;
ret = ptrace_get_reg(child, numReg, &tmp2);
@@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
- ((unsigned int *)child->thread.fpr)
+ ((unsigned int *)child->thread.fp_state.fpr)
[FPRINDEX(index)] = data;
ret = 0;
}
@@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
u64 *tmp;
flush_fp_to_thread(child);
/* get 64 bit FPR ... */
- tmp = &(((u64 *)child->thread.fpr)
- [FPRINDEX_3264(numReg)]);
+ tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
/* ... write the 32 bit part we want */
((u32 *)tmp)[index % 2] = data;
ret = 0;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index e143d03..5d5fe63 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -265,27 +265,27 @@ struct rt_sigframe {
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
buf[i] = task->thread.TS_FPR(i);
- memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
+ buf[i] = task->thread.fp_state.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
task->thread.TS_FPR(i) = buf[i];
- memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
+ task->thread.fp_state.fpscr = buf[i];
return 0;
}
@@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task,
unsigned long copy_vsx_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
unsigned long copy_vsx_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
@@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
unsigned long copy_transact_fpr_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
buf[i] = task->thread.TS_TRANS_FPR(i);
- memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
+ buf[i] = task->thread.transact_fp.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
unsigned long copy_transact_fpr_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
task->thread.TS_TRANS_FPR(i) = buf[i];
- memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
+ task->thread.transact_fp.fpscr = buf[i];
return 0;
}
@@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task,
unsigned long copy_transact_vsx_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
unsigned long copy_transact_vsx_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task,
inline unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
- return __copy_to_user(to, task->thread.fpr,
+ return __copy_to_user(to, task->thread.fp_state.fpr,
ELF_NFPREG * sizeof(double));
}
inline unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from)
{
- return __copy_from_user(task->thread.fpr, from,
+ return __copy_from_user(task->thread.fp_state.fpr, from,
ELF_NFPREG * sizeof(double));
}
@@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
inline unsigned long copy_transact_fpr_to_user(void __user *to,
struct task_struct *task)
{
- return __copy_to_user(to, task->thread.transact_fpr,
+ return __copy_to_user(to, task->thread.transact_fp.fpr,
ELF_NFPREG * sizeof(double));
}
inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
void __user *from)
{
- return __copy_from_user(task->thread.transact_fpr, from,
+ return __copy_from_user(task->thread.transact_fp.fpr, from,
ELF_NFPREG * sizeof(double));
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
/* save altivec registers */
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+ if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
/* set MSR_VEC in the saved MSR value to indicate that
@@ -540,17 +540,17 @@ static int save_tm_user_regs(struct pt_regs *regs,
/* save altivec registers */
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+ if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
if (msr & MSR_VEC) {
if (__copy_to_user(&tm_frame->mc_vregs,
- current->thread.transact_vr,
+ &current->thread.transact_vr,
ELF_NVRREG * sizeof(vector128)))
return 1;
} else {
if (__copy_to_user(&tm_frame->mc_vregs,
- current->thread.vr,
+ &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
}
@@ -698,11 +698,12 @@ static long restore_user_regs(struct pt_regs *regs,
regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
} else if (current->thread.used_vr)
- memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
+ memset(&current->thread.vr_state, 0,
+ ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
@@ -728,7 +729,7 @@ static long restore_user_regs(struct pt_regs *regs,
return 1;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
#endif /* CONFIG_VSX */
/*
* force the process to reload the FP registers from
@@ -804,15 +805,16 @@ static long restore_tm_user_regs(struct pt_regs *regs,
regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)) ||
- __copy_from_user(current->thread.transact_vr,
+ __copy_from_user(&current->thread.transact_vr,
&tm_sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
} else if (current->thread.used_vr) {
- memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
- memset(current->thread.transact_vr, 0,
+ memset(&current->thread.vr_state, 0,
+ ELF_NVRREG * sizeof(vector128));
+ memset(&current->thread.transact_vr, 0,
ELF_NVRREG * sizeof(vector128));
}
@@ -844,8 +846,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
return 1;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
- current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
}
#endif /* CONFIG_VSX */
@@ -1036,7 +1038,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
if (__put_user(0, &rt_sf->uc.uc_link))
goto badframe;
- current->thread.fpscr.val = 0; /* turn off all fp exceptions */
+ current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1468,7 +1470,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->link = tramp;
- current->thread.fpscr.val = 0; /* turn off all fp exceptions */
+ current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 36b1d1d..df5b948 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
+ err |= __copy_to_user(v_regs, &current->thread.vr_state,
+ 33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
*/
@@ -201,18 +202,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, current->thread.vr,
+ err |= __copy_to_user(v_regs, &current->thread.vr_state,
33 * sizeof(vector128));
/* If VEC was enabled there are transactional VRs valid too,
* else they're a copy of the checkpointed VRs.
*/
if (msr & MSR_VEC)
err |= __copy_to_user(tm_v_regs,
- current->thread.transact_vr,
+ &current->thread.transact_vr,
33 * sizeof(vector128));
else
err |= __copy_to_user(tm_v_regs,
- current->thread.vr,
+ &current->thread.vr_state,
33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate
@@ -355,10 +356,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && (msr & MSR_VEC) != 0)
- err |= __copy_from_user(current->thread.vr, v_regs,
+ err |= __copy_from_user(&current->thread.vr_state, v_regs,
33 * sizeof(vector128));
else if (current->thread.used_vr)
- memset(current->thread.vr, 0, 33 * sizeof(vector128));
+ memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
/* Always get VRSAVE back */
if (v_regs != NULL)
err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
@@ -380,7 +381,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= copy_vsx_from_user(current, v_regs);
else
for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
#endif
return err;
}
@@ -474,14 +475,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
- err |= __copy_from_user(current->thread.vr, v_regs,
+ err |= __copy_from_user(&current->thread.vr_state, v_regs,
33 * sizeof(vector128));
- err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
+ err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
33 * sizeof(vector128));
}
else if (current->thread.used_vr) {
- memset(current->thread.vr, 0, 33 * sizeof(vector128));
- memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
+ memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
+ memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL && tm_v_regs != NULL) {
@@ -513,8 +514,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
err |= copy_transact_vsx_from_user(current, tm_v_regs);
} else {
for (i = 0; i < 32 ; i++) {
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
- current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
}
}
#endif
@@ -753,7 +754,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
- current->thread.fpscr.val = 0;
+ current->thread.fp_state.fpscr = 0;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index cd809ea..761af4f 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -12,16 +12,15 @@
#include <asm/reg.h>
#ifdef CONFIG_VSX
-/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
+/* See fpu.S, this is borrowed from there */
+#define __SAVE_32FPRS_VSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
- SAVE_32FPRS_TRANSACT(n,base); \
+ SAVE_32FPRS(n,base); \
b 3f; \
-2: SAVE_32VSRS_TRANSACT(n,c,base); \
+2: SAVE_32VSRS(n,c,base); \
3:
-/* ...and this is just plain borrowed from there. */
#define __REST_32FPRS_VSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: REST_32VSRS(n,c,base); \
3:
#else
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base)
-#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base)
+#define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base)
+#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base)
#endif
-#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
- __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base)
+#define SAVE_32FPRS_VSRS(n,c,base) \
+ __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
#define REST_32FPRS_VSRS(n,c,base) \
__REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
@@ -157,10 +156,11 @@ _GLOBAL(tm_reclaim)
andis. r0, r4, MSR_VEC@h
beq dont_backup_vec
- SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */
+ addi r7, r3, THREAD_TRANSACT_VRSTATE
+ SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
mfvscr vr0
- li r6, THREAD_TRANSACT_VSCR
- stvx vr0, r3, r6
+ li r6, VRSTATE_VSCR
+ stvx vr0, r7, r6
dont_backup_vec:
mfspr r0, SPRN_VRSAVE
std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -168,10 +168,11 @@ dont_backup_vec:
andi. r0, r4, MSR_FP
beq dont_backup_fp
- SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */
+ addi r7, r3, THREAD_TRANSACT_FPSTATE
+ SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */
mffs fr0
- stfd fr0,THREAD_TRANSACT_FPSCR(r3)
+ stfd fr0,FPSTATE_FPSCR(r7)
dont_backup_fp:
/* The moment we treclaim, ALL of our GPRs will switch
@@ -358,10 +359,11 @@ _GLOBAL(tm_recheckpoint)
andis. r0, r4, MSR_VEC@h
beq dont_restore_vec
- li r5, THREAD_VSCR
- lvx vr0, r3, r5
+ addi r8, r3, THREAD_VRSTATE
+ li r5, VRSTATE_VSCR
+ lvx vr0, r8, r5
mtvscr vr0
- REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
+ REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
dont_restore_vec:
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
@@ -370,9 +372,10 @@ dont_restore_vec:
andi. r0, r4, MSR_FP
beq dont_restore_fp
- lfd fr0, THREAD_FPSCR(r3)
+ addi r8, r3, THREAD_FPSTATE
+ lfd fr0, FPSTATE_FPSCR(r8)
MTFSF_L(fr0)
- REST_32FPRS_VSRS(0, R4, R3)
+ REST_32FPRS_VSRS(0, R4, R8)
dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 31840ee..6bf52dc 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs)
flush_fp_to_thread(current);
- code = __parse_fpscr(current->thread.fpscr.val);
+ code = __parse_fpscr(current->thread.fp_state.fpscr);
_exception(SIGFPE, regs, code, regs->nip);
}
@@ -1076,7 +1076,7 @@ static int emulate_math(struct pt_regs *regs)
return 0;
case 1: {
int code = 0;
- code = __parse_fpscr(current->thread.fpscr.val);
+ code = __parse_fpscr(current->thread.fp_state.fpscr);
_exception(SIGFPE, regs, code, regs->nip);
return 0;
}
@@ -1378,8 +1378,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-extern void do_load_up_fpu(struct pt_regs *regs);
-
void fp_unavailable_tm(struct pt_regs *regs)
{
/* Note: This does not handle any kind of FP laziness. */
@@ -1410,8 +1408,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
}
#ifdef CONFIG_ALTIVEC
-extern void do_load_up_altivec(struct pt_regs *regs);
-
void altivec_unavailable_tm(struct pt_regs *regs)
{
/* See the comments in fp_unavailable_tm(). This function operates
@@ -1642,7 +1638,7 @@ void altivec_assist_exception(struct pt_regs *regs)
/* XXX quick hack for now: set the non-Java bit in the VSCR */
printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
"in %s at %lx\n", current->comm, regs->nip);
- current->thread.vscr.u[3] |= 0x10000;
+ current->thread.vr_state.vscr.u[3] |= 0x10000;
}
}
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 604d094..c4bfadb 100644
--- a/arch/powerpc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
@@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs)
vb = (instr >> 11) & 0x1f;
vc = (instr >> 6) & 0x1f;
- vrs = current->thread.vr;
+ vrs = current->thread.vr_state.vr;
switch (instr & 0x3f) {
case 10:
switch (vc) {
@@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs)
case 14: /* vctuxs */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
- &current->thread.vscr.u[3]);
+ &current->thread.vr_state.vscr.u[3]);
break;
case 15: /* vctsxs */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
- &current->thread.vscr.u[3]);
+ &current->thread.vr_state.vscr.u[3]);
break;
default:
return -EINVAL;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9e20999..0458a9a 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -8,29 +8,6 @@
#include <asm/ptrace.h>
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_altivec from C.
- * void do_load_up_altivec(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_altivec)
- mflr r0
- std r0, 16(r1)
- stdu r1, -112(r1)
-
- subi r6, r3, STACK_FRAME_OVERHEAD
- /* load_up_altivec expects r12=MSR, r13=PACA, and returns
- * with r12 = new MSR.
- */
- ld r12,_MSR(r6)
- GET_PACA(r13)
- bl load_up_altivec
- std r12,_MSR(r6)
-
- ld r0, 112+16(r1)
- addi r1, r1, 112
- mtlr r0
- blr
-
/* void do_load_up_transact_altivec(struct thread_struct *thread)
*
* This is similar to load_up_altivec but for the transactional version of the
@@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec)
li r4,1
stw r4,THREAD_USED_VR(r3)
- li r10,THREAD_TRANSACT_VSCR
+ li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
lvx vr0,r10,r3
mtvscr vr0
- REST_32VRS_TRANSACT(0,r4,r3)
+ addi r10,r3,THREAD_TRANSACT_VRSTATE
+ REST_32VRS(0,r4,r10)
/* Disable VEC again. */
MTMSRD(r6)
@@ -59,12 +37,36 @@ _GLOBAL(do_load_up_transact_altivec)
#endif
/*
- * load_up_altivec(unused, unused, tsk)
+ * Load state from memory into VMX registers including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(load_vr_state)
+ li r4,VRSTATE_VSCR
+ lvx vr0,r4,r3
+ mtvscr vr0
+ REST_32VRS(0,r4,r3)
+ blr
+
+/*
+ * Store VMX state into memory, including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(store_vr_state)
+ SAVE_32VRS(0, r4, r3)
+ mfvscr vr0
+ li r4, VRSTATE_VSCR
+ stvx vr0, r4, r3
+ blr
+
+/*
* Disable VMX for the task which had it previously,
* and save its vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
* On SMP we know the VMX is free, since we give it up every
* switch (ie, no lazy save of the vector registers).
+ *
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
@@ -90,10 +92,11 @@ _GLOBAL(load_up_altivec)
/* Save VMX state to last_task_used_altivec's THREAD struct */
toreal(r4)
addi r4,r4,THREAD
- SAVE_32VRS(0,r5,r4)
+ addi r6,r4,THREAD_VRSTATE
+ SAVE_32VRS(0,r5,r6)
mfvscr vr0
- li r10,THREAD_VSCR
- stvx vr0,r10,r4
+ li r10,VRSTATE_VSCR
+ stvx vr0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -125,12 +128,13 @@ _GLOBAL(load_up_altivec)
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
#endif
+ addi r6,r5,THREAD_VRSTATE
li r4,1
- li r10,THREAD_VSCR
+ li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r5
+ lvx vr0,r10,r6
mtvscr vr0
- REST_32VRS(0,r4,r5)
+ REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
subi r4,r5,THREAD /* Back to 'current' */
@@ -165,12 +169,16 @@ _GLOBAL(giveup_altivec)
PPC_LCMPI 0,r3,0
beqlr /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
+ PPC_LL r7,THREAD_VRSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3)
- PPC_LCMPI 0,r5,0
- SAVE_32VRS(0,r4,r3)
+ PPC_LCMPI 0,r7,0
+ bne 2f
+ addi r7,r3,THREAD_VRSTATE
+2: PPC_LCMPI 0,r5,0
+ SAVE_32VRS(0,r4,r7)
mfvscr vr0
- li r4,THREAD_VSCR
- stvx vr0,r4,r3
+ li r4,VRSTATE_VSCR
+ stvx vr0,r4,r7
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 6846ebc..a0299bb 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -444,7 +444,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
- u64 *thread_fpr = (u64*)t->fpr;
+ u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/*
@@ -466,14 +466,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
/*
* Note that on CPUs with VSX, giveup_fpu stores
* both the traditional FP registers and the added VSX
- * registers into thread.fpr[].
+ * registers into thread.fp_state.fpr[].
*/
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
- vcpu->arch.fpscr = t->fpscr.val;
+ vcpu->arch.fpscr = t->fp_state.fpscr;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
@@ -486,8 +486,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vscr;
+ memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vr_state.vscr;
}
#endif
@@ -539,7 +539,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
- u64 *thread_fpr = (u64*)t->fpr;
+ u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/* When we have paired singles, we emulate in software */
@@ -584,15 +584,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
#endif
- t->fpscr.val = vcpu->arch.fpscr;
+ t->fp_state.fpscr = vcpu->arch.fpscr;
t->fpexc_mode = 0;
kvmppc_load_up_fpu();
}
if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC
- memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vscr = vcpu->arch.vscr;
+ memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vr_state.vscr = vcpu->arch.vscr;
t->vrsave = -1;
kvmppc_load_up_altivec();
#endif
@@ -1114,12 +1114,10 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
- double fpr[32][TS_FPRWIDTH];
- unsigned int fpscr;
+ struct thread_fp_state fp;
int fpexc_mode;
#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
+ struct thread_vr_state vr;
unsigned long uninitialized_var(vrsave);
int used_vr;
#endif
@@ -1148,8 +1146,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Save FPU state in stack */
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
+ fp = current->thread.fp_state;
fpexc_mode = current->thread.fpexc_mode;
#ifdef CONFIG_ALTIVEC
@@ -1158,8 +1155,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (used_vr) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
- vscr = current->thread.vscr;
+ vr = current->thread.vr_state;
vrsave = current->thread.vrsave;
}
#endif
@@ -1191,15 +1187,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.regs->msr = ext_msr;
/* Restore FPU/VSX state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
+ current->thread.fp_state = fp;
current->thread.fpexc_mode = fpexc_mode;
#ifdef CONFIG_ALTIVEC
/* Restore Altivec state from stack */
if (used_vr && current->thread.used_vr) {
- memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
- current->thread.vscr = vscr;
+ current->thread.vr_state = vr;
current->thread.vrsave = vrsave;
}
current->thread.used_vr = used_vr;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6ec3920..d95438e 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,9 +681,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
int ret, s;
struct thread_struct thread;
#ifdef CONFIG_PPC_FPU
- unsigned int fpscr;
+ struct thread_fp_state fp;
int fpexc_mode;
- u64 fpr[32];
#endif
#ifdef CONFIG_ALTIVEC
@@ -706,13 +705,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
enable_kernel_fp();
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
+ fp = current->thread.fp_state;
fpexc_mode = current->thread.fpexc_mode;
/* Restore guest FPU state to thread */
- memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
- current->thread.fpscr.val = vcpu->arch.fpscr;
+ memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
+ sizeof(vcpu->arch.fpr));
+ current->thread.fp_state.fpscr = vcpu->arch.fpscr;
/*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -768,12 +767,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->fpu_active = 0;
/* Save guest FPU state from thread */
- memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
- vcpu->arch.fpscr = current->thread.fpscr.val;
+ memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
+ sizeof(vcpu->arch.fpr));
+ vcpu->arch.fpscr = current->thread.fp_state.fpscr;
/* Restore userspace FPU state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
+ current->thread.fp_state = fp;
current->thread.fpexc_mode = fpexc_mode;
#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 47262bf..0660c05 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -445,8 +445,8 @@ void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr)
{
}
-struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
- unsigned int id)
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 3597b6f..459dc87 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -354,8 +354,8 @@ void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr)
kunmap_atomic((u32 *)eaddr);
}
-struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
- unsigned int id)
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;