summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-29 00:44:05 (GMT)
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-01 02:52:25 (GMT)
commitdc4fbba11e4661a6a77a1f89ba32f9082e6395ff (patch)
tree567037dc8e93063615e558eda1c80f8a82154749 /arch/powerpc/lib
parenta0e72cf12b1a1f159b6822ed2e1e41893d996fc7 (diff)
downloadlinux-dc4fbba11e4661a6a77a1f89ba32f9082e6395ff.tar.xz
powerpc: Create disable_kernel_{fp,altivec,vsx,spe}()
The enable_kernel_*() functions leave the relevant MSR bits enabled until we exit the kernel sometime later. Create disable versions that wrap the kernel use of FP, Altivec VSX or SPE. While we don't want to disable it normally for performance reasons (MSR writes are slow), it will be used for a debug boot option that does this and catches bad uses in other areas of the kernel. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/vmx-helper.c2
-rw-r--r--arch/powerpc/lib/xor_vmx.c4
2 files changed, 6 insertions, 0 deletions
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index ac93a3b..b27e030 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -46,6 +46,7 @@ int enter_vmx_usercopy(void)
*/
int exit_vmx_usercopy(void)
{
+ disable_kernel_altivec();
pagefault_enable();
preempt_enable();
return 0;
@@ -70,6 +71,7 @@ int enter_vmx_copy(void)
*/
void *exit_vmx_copy(void *dest)
{
+ disable_kernel_altivec();
preempt_enable();
return dest;
}
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
index e905f7c..07f49f1 100644
--- a/arch/powerpc/lib/xor_vmx.c
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
v2 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
@@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
v3 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
@@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
v4 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
@@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
v5 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_5);