summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/Makefile21
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/lib/copyuser_power7.S54
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/lib/memcpy_power7.S55
-rw-r--r--arch/powerpc/lib/sstep.c110
-rw-r--r--arch/powerpc/lib/xor_vmx.c177
7 files changed, 393 insertions, 86 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 4504332..95a20e1 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -10,15 +10,23 @@ CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
obj-y := string.o alloc.o \
- checksum_$(CONFIG_WORD_SIZE).o crtsavres.o
+ crtsavres.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
- memcpy_64.o usercopy_64.o mem_64.o string.o \
- checksum_wrappers_64.o hweight_64.o \
- copyuser_power7.o string_64.o copypage_power7.o \
- memcpy_power7.o
+ usercopy_64.o mem_64.o string.o \
+ hweight_64.o \
+ copyuser_power7.o string_64.o copypage_power7.o
+ifeq ($(CONFIG_GENERIC_CSUM),)
+obj-y += checksum_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC64) += checksum_wrappers_64.o
+endif
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),)
+obj-$(CONFIG_PPC64) += memcpy_power7.o memcpy_64.o
+endif
+
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
ifeq ($(CONFIG_PPC64),y)
@@ -31,3 +39,6 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-y += code-patching.o
obj-y += feature-fixups.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
+
+obj-$(CONFIG_ALTIVEC) += xor_vmx.o
+CFLAGS_xor_vmx.o += -maltivec -mabi=altivec
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f725..57a0720 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
blr
- .macro source
+ .macro srcnr
100:
.section __ex_table,"a"
.align 3
- .llong 100b,.Lsrc_error
+ .llong 100b,.Lsrc_error_nr
.previous
.endm
- .macro dest
+ .macro source
+150:
+ .section __ex_table,"a"
+ .align 3
+ .llong 150b,.Lsrc_error
+ .previous
+ .endm
+
+ .macro dstnr
200:
.section __ex_table,"a"
.align 3
- .llong 200b,.Ldest_error
+ .llong 200b,.Ldest_error_nr
+ .previous
+ .endm
+
+ .macro dest
+250:
+ .section __ex_table,"a"
+ .align 3
+ .llong 250b,.Ldest_error
.previous
.endm
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcopy_aligned
- li r7,4
- sub r6,r7,r6
+ li r9,4
+ sub r6,r9,r6
mtctr r6
1:
-source; lhz r6,0(r3) /* align to doubleword */
+srcnr; lhz r6,0(r3) /* align to doubleword */
subi r5,r5,2
addi r3,r3,2
adde r0,r0,r6
-dest; sth r6,0(r4)
+dstnr; sth r6,0(r4)
addi r4,r4,2
bdnz 1b
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
mtctr r6
3:
-source; ld r6,0(r3)
+srcnr; ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
-dest; std r6,0(r4)
+dstnr; std r6,0(r4)
addi r4,r4,8
bdnz 3b
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
srdi. r6,r5,2
beq .Lcopy_tail_halfword
-source; lwz r6,0(r3)
+srcnr; lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
-dest; stw r6,0(r4)
+dstnr; stw r6,0(r4)
addi r4,r4,4
subi r5,r5,4
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
srdi. r6,r5,1
beq .Lcopy_tail_byte
-source; lhz r6,0(r3)
+srcnr; lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
-dest; sth r6,0(r4)
+dstnr; sth r6,0(r4)
addi r4,r4,2
subi r5,r5,2
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
andi. r6,r5,1
beq .Lcopy_finish
-source; lbz r6,0(r3)
+srcnr; lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
-dest; stb r6,0(r4)
+dstnr; stb r6,0(r4)
.Lcopy_finish:
addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
blr
.Lsrc_error:
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ addi r1,r1,STACKFRAMESIZE
+.Lsrc_error_nr:
cmpdi 0,r7,0
beqlr
li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
blr
.Ldest_error:
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ addi r1,r1,STACKFRAMESIZE
+.Ldest_error_nr:
cmpdi 0,r8,0
beqlr
li r6,-EFAULT
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index d1f1179..e8e9c36 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -19,6 +19,14 @@
*/
#include <asm/ppc_asm.h>
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
+#endif
+
.macro err1
100:
.section __ex_table,"a"
@@ -552,13 +560,13 @@ err3; stw r7,4(r3)
li r10,32
li r11,48
- lvsl vr16,0,r4 /* Setup permute control vector */
+ LVS(vr16,0,r4) /* Setup permute control vector */
err3; lvx vr0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
err3; stvx vr8,r0,r3
addi r3,r3,16
@@ -566,9 +574,9 @@ err3; stvx vr8,r0,r3
5: bf cr7*4+2,6f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
err3; lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -576,13 +584,13 @@ err3; stvx vr9,r3,r9
6: bf cr7*4+1,7f
err3; lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
err3; lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
err3; lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
err3; lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -611,21 +619,21 @@ err3; stvx vr11,r3,r11
.align 5
8:
err4; lvx vr7,r0,r4
- vperm vr8,vr0,vr7,vr16
+ VPERM(vr8,vr0,vr7,vr16)
err4; lvx vr6,r4,r9
- vperm vr9,vr7,vr6,vr16
+ VPERM(vr9,vr7,vr6,vr16)
err4; lvx vr5,r4,r10
- vperm vr10,vr6,vr5,vr16
+ VPERM(vr10,vr6,vr5,vr16)
err4; lvx vr4,r4,r11
- vperm vr11,vr5,vr4,vr16
+ VPERM(vr11,vr5,vr4,vr16)
err4; lvx vr3,r4,r12
- vperm vr12,vr4,vr3,vr16
+ VPERM(vr12,vr4,vr3,vr16)
err4; lvx vr2,r4,r14
- vperm vr13,vr3,vr2,vr16
+ VPERM(vr13,vr3,vr2,vr16)
err4; lvx vr1,r4,r15
- vperm vr14,vr2,vr1,vr16
+ VPERM(vr14,vr2,vr1,vr16)
err4; lvx vr0,r4,r16
- vperm vr15,vr1,vr0,vr16
+ VPERM(vr15,vr1,vr0,vr16)
addi r4,r4,128
err4; stvx vr8,r0,r3
err4; stvx vr9,r3,r9
@@ -649,13 +657,13 @@ err4; stvx vr15,r3,r16
bf cr7*4+1,9f
err3; lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
err3; lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
err3; lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
err3; lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -665,9 +673,9 @@ err3; stvx vr11,r3,r11
9: bf cr7*4+2,10f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
err3; lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -675,7 +683,7 @@ err3; stvx vr9,r3,r9
10: bf cr7*4+3,11f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
err3; stvx vr8,r0,r3
addi r3,r3,16
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index bb7cfec..0c9c8d7 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -32,7 +32,7 @@ void __spin_yield(arch_spinlock_t *lock)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = lppaca_of(holder_cpu).yield_count;
+ yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
@@ -57,7 +57,7 @@ void __rw_yield(arch_rwlock_t *rw)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = lppaca_of(holder_cpu).yield_count;
+ yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0663630..e4177db 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -20,6 +20,15 @@
#include <asm/ppc_asm.h>
_GLOBAL(memcpy_power7)
+
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
+#endif
+
#ifdef CONFIG_ALTIVEC
cmpldi r5,16
cmpldi cr1,r5,4096
@@ -485,13 +494,13 @@ _GLOBAL(memcpy_power7)
li r10,32
li r11,48
- lvsl vr16,0,r4 /* Setup permute control vector */
+ LVS(vr16,0,r4) /* Setup permute control vector */
lvx vr0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
stvx vr8,r0,r3
addi r3,r3,16
@@ -499,9 +508,9 @@ _GLOBAL(memcpy_power7)
5: bf cr7*4+2,6f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -509,13 +518,13 @@ _GLOBAL(memcpy_power7)
6: bf cr7*4+1,7f
lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -544,21 +553,21 @@ _GLOBAL(memcpy_power7)
.align 5
8:
lvx vr7,r0,r4
- vperm vr8,vr0,vr7,vr16
+ VPERM(vr8,vr0,vr7,vr16)
lvx vr6,r4,r9
- vperm vr9,vr7,vr6,vr16
+ VPERM(vr9,vr7,vr6,vr16)
lvx vr5,r4,r10
- vperm vr10,vr6,vr5,vr16
+ VPERM(vr10,vr6,vr5,vr16)
lvx vr4,r4,r11
- vperm vr11,vr5,vr4,vr16
+ VPERM(vr11,vr5,vr4,vr16)
lvx vr3,r4,r12
- vperm vr12,vr4,vr3,vr16
+ VPERM(vr12,vr4,vr3,vr16)
lvx vr2,r4,r14
- vperm vr13,vr3,vr2,vr16
+ VPERM(vr13,vr3,vr2,vr16)
lvx vr1,r4,r15
- vperm vr14,vr2,vr1,vr16
+ VPERM(vr14,vr2,vr1,vr16)
lvx vr0,r4,r16
- vperm vr15,vr1,vr0,vr16
+ VPERM(vr15,vr1,vr0,vr16)
addi r4,r4,128
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -582,13 +591,13 @@ _GLOBAL(memcpy_power7)
bf cr7*4+1,9f
lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -598,9 +607,9 @@ _GLOBAL(memcpy_power7)
9: bf cr7*4+2,10f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -608,7 +617,7 @@ _GLOBAL(memcpy_power7)
10: bf cr7*4+3,11f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
stvx vr8,r0,r3
addi r3,r3,16
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index e15c521..c0511c2 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -100,8 +100,10 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs
ea = (signed short) instr; /* sign-extend */
if (ra) {
ea += regs->gpr[ra];
- if (instr & 0x04000000) /* update forms */
- regs->gpr[ra] = ea;
+ if (instr & 0x04000000) { /* update forms */
+ if ((instr>>26) != 47) /* stmw is not an update form */
+ regs->gpr[ra] = ea;
+ }
}
return truncate_if_32bit(regs->msr, ea);
@@ -210,11 +212,19 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
{
int err;
unsigned long x, b, c;
+#ifdef __LITTLE_ENDIAN__
+ int len = nb; /* save a copy of the length for byte reversal */
+#endif
/* unaligned, do this in pieces */
x = 0;
for (; nb > 0; nb -= c) {
+#ifdef __LITTLE_ENDIAN__
+ c = 1;
+#endif
+#ifdef __BIG_ENDIAN__
c = max_align(ea);
+#endif
if (c > nb)
c = max_align(nb);
err = read_mem_aligned(&b, ea, c);
@@ -223,7 +233,24 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
x = (x << (8 * c)) + b;
ea += c;
}
+#ifdef __LITTLE_ENDIAN__
+ switch (len) {
+ case 2:
+ *dest = byterev_2(x);
+ break;
+ case 4:
+ *dest = byterev_4(x);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ *dest = byterev_8(x);
+ break;
+#endif
+ }
+#endif
+#ifdef __BIG_ENDIAN__
*dest = x;
+#endif
return 0;
}
@@ -271,15 +298,35 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
int err;
unsigned long c;
+#ifdef __LITTLE_ENDIAN__
+ switch (nb) {
+ case 2:
+ val = byterev_2(val);
+ break;
+ case 4:
+ val = byterev_4(val);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ val = byterev_8(val);
+ break;
+#endif
+ }
+#endif
/* unaligned or little-endian, do this in pieces */
for (; nb > 0; nb -= c) {
+#ifdef __LITTLE_ENDIAN__
+ c = 1;
+#endif
+#ifdef __BIG_ENDIAN__
c = max_align(ea);
+#endif
if (c > nb)
c = max_align(nb);
err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
if (err)
return err;
- ++ea;
+ ea += c;
}
return 0;
}
@@ -308,22 +355,36 @@ static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
struct pt_regs *regs)
{
int err;
- unsigned long val[sizeof(double) / sizeof(long)];
+ union {
+ double dbl;
+ unsigned long ul[2];
+ struct {
+#ifdef __BIG_ENDIAN__
+ unsigned _pad_;
+ unsigned word;
+#endif
+#ifdef __LITTLE_ENDIAN__
+ unsigned word;
+ unsigned _pad_;
+#endif
+ } single;
+ } data;
unsigned long ptr;
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & 3) == 0)
return (*func)(rn, ea);
- ptr = (unsigned long) &val[0];
+ ptr = (unsigned long) &data.ul;
if (sizeof(unsigned long) == 8 || nb == 4) {
- err = read_mem_unaligned(&val[0], ea, nb, regs);
- ptr += sizeof(unsigned long) - nb;
+ err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
+ if (nb == 4)
+ ptr = (unsigned long)&(data.single.word);
} else {
/* reading a double on 32-bit */
- err = read_mem_unaligned(&val[0], ea, 4, regs);
+ err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
if (!err)
- err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
+ err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
}
if (err)
return err;
@@ -335,28 +396,42 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
struct pt_regs *regs)
{
int err;
- unsigned long val[sizeof(double) / sizeof(long)];
+ union {
+ double dbl;
+ unsigned long ul[2];
+ struct {
+#ifdef __BIG_ENDIAN__
+ unsigned _pad_;
+ unsigned word;
+#endif
+#ifdef __LITTLE_ENDIAN__
+ unsigned word;
+ unsigned _pad_;
+#endif
+ } single;
+ } data;
unsigned long ptr;
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & 3) == 0)
return (*func)(rn, ea);
- ptr = (unsigned long) &val[0];
+ ptr = (unsigned long) &data.ul[0];
if (sizeof(unsigned long) == 8 || nb == 4) {
- ptr += sizeof(unsigned long) - nb;
+ if (nb == 4)
+ ptr = (unsigned long)&(data.single.word);
err = (*func)(rn, ptr);
if (err)
return err;
- err = write_mem_unaligned(val[0], ea, nb, regs);
+ err = write_mem_unaligned(data.ul[0], ea, nb, regs);
} else {
/* writing a double on 32-bit */
err = (*func)(rn, ptr);
if (err)
return err;
- err = write_mem_unaligned(val[0], ea, 4, regs);
+ err = write_mem_unaligned(data.ul[0], ea, 4, regs);
if (!err)
- err = write_mem_unaligned(val[1], ea + 4, 4, regs);
+ err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
}
return err;
}
@@ -580,7 +655,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
if (instr & 1)
regs->link = regs->nip;
if (branch_taken(instr, regs))
- regs->nip = imm;
+ regs->nip = truncate_if_32bit(regs->msr, imm);
return 1;
#ifdef CONFIG_PPC64
case 17: /* sc */
@@ -1503,6 +1578,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
*/
if ((ra == 1) && !(regs->msr & MSR_PR) \
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
/*
* Check if we will touch kernel sack overflow
*/
@@ -1511,7 +1587,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
err = -EINVAL;
break;
}
-
+#endif /* CONFIG_PPC32 */
/*
* Check if we already set since that means we'll
* lose the previous value.
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
new file mode 100644
index 0000000..e905f7c
--- /dev/null
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <altivec.h>
+
+#include <linux/preempt.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+typedef vector signed char unative_t;
+
+#define DEFINE(V) \
+ unative_t *V = (unative_t *)V##_in; \
+ unative_t V##_0, V##_1, V##_2, V##_3
+
+#define LOAD(V) \
+ do { \
+ V##_0 = V[0]; \
+ V##_1 = V[1]; \
+ V##_2 = V[2]; \
+ V##_3 = V[3]; \
+ } while (0)
+
+#define STORE(V) \
+ do { \
+ V[0] = V##_0; \
+ V[1] = V##_1; \
+ V[2] = V##_2; \
+ V[3] = V##_3; \
+ } while (0)
+
+#define XOR(V1, V2) \
+ do { \
+ V1##_0 = vec_xor(V1##_0, V2##_0); \
+ V1##_1 = vec_xor(V1##_1, V2##_1); \
+ V1##_2 = vec_xor(V1##_2, V2##_2); \
+ V1##_3 = vec_xor(V1##_3, V2##_3); \
+ } while (0)
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ XOR(v1, v2);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_2);
+
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ DEFINE(v3);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ LOAD(v3);
+ XOR(v1, v2);
+ XOR(v1, v3);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ v3 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_3);
+
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ DEFINE(v3);
+ DEFINE(v4);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ LOAD(v3);
+ LOAD(v4);
+ XOR(v1, v2);
+ XOR(v3, v4);
+ XOR(v1, v3);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ v3 += 4;
+ v4 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_4);
+
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ DEFINE(v3);
+ DEFINE(v4);
+ DEFINE(v5);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ LOAD(v3);
+ LOAD(v4);
+ LOAD(v5);
+ XOR(v1, v2);
+ XOR(v3, v4);
+ XOR(v1, v5);
+ XOR(v1, v3);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ v3 += 4;
+ v4 += 4;
+ v5 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_5);