summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /arch/powerpc/lib
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/Makefile21
-rw-r--r--arch/powerpc/lib/copyuser_power7.S54
-rw-r--r--arch/powerpc/lib/memcpy_power7.S55
-rw-r--r--arch/powerpc/lib/sstep.c97
-rw-r--r--arch/powerpc/lib/xor_vmx.c177
5 files changed, 63 insertions, 341 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 95a20e1..4504332 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -10,23 +10,15 @@ CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
obj-y := string.o alloc.o \
- crtsavres.o
+ checksum_$(CONFIG_WORD_SIZE).o crtsavres.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
- usercopy_64.o mem_64.o string.o \
- hweight_64.o \
- copyuser_power7.o string_64.o copypage_power7.o
-ifeq ($(CONFIG_GENERIC_CSUM),)
-obj-y += checksum_$(CONFIG_WORD_SIZE).o
-obj-$(CONFIG_PPC64) += checksum_wrappers_64.o
-endif
-
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),)
-obj-$(CONFIG_PPC64) += memcpy_power7.o memcpy_64.o
-endif
-
+ memcpy_64.o usercopy_64.o mem_64.o string.o \
+ checksum_wrappers_64.o hweight_64.o \
+ copyuser_power7.o string_64.o copypage_power7.o \
+ memcpy_power7.o
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
ifeq ($(CONFIG_PPC64),y)
@@ -39,6 +31,3 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-y += code-patching.o
obj-y += feature-fixups.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
-
-obj-$(CONFIG_ALTIVEC) += xor_vmx.o
-CFLAGS_xor_vmx.o += -maltivec -mabi=altivec
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index e8e9c36..d1f1179 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -19,14 +19,6 @@
*/
#include <asm/ppc_asm.h>
-#ifdef __BIG_ENDIAN__
-#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
-#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
-#else
-#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
-#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
-#endif
-
.macro err1
100:
.section __ex_table,"a"
@@ -560,13 +552,13 @@ err3; stw r7,4(r3)
li r10,32
li r11,48
- LVS(vr16,0,r4) /* Setup permute control vector */
+ lvsl vr16,0,r4 /* Setup permute control vector */
err3; lvx vr0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
addi r4,r4,16
err3; stvx vr8,r0,r3
addi r3,r3,16
@@ -574,9 +566,9 @@ err3; stvx vr8,r0,r3
5: bf cr7*4+2,6f
err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
err3; lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ vperm vr9,vr1,vr0,vr16
addi r4,r4,32
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -584,13 +576,13 @@ err3; stvx vr9,r3,r9
6: bf cr7*4+1,7f
err3; lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
+ vperm vr8,vr0,vr3,vr16
err3; lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
+ vperm vr9,vr3,vr2,vr16
err3; lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
+ vperm vr10,vr2,vr1,vr16
err3; lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ vperm vr11,vr1,vr0,vr16
addi r4,r4,64
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -619,21 +611,21 @@ err3; stvx vr11,r3,r11
.align 5
8:
err4; lvx vr7,r0,r4
- VPERM(vr8,vr0,vr7,vr16)
+ vperm vr8,vr0,vr7,vr16
err4; lvx vr6,r4,r9
- VPERM(vr9,vr7,vr6,vr16)
+ vperm vr9,vr7,vr6,vr16
err4; lvx vr5,r4,r10
- VPERM(vr10,vr6,vr5,vr16)
+ vperm vr10,vr6,vr5,vr16
err4; lvx vr4,r4,r11
- VPERM(vr11,vr5,vr4,vr16)
+ vperm vr11,vr5,vr4,vr16
err4; lvx vr3,r4,r12
- VPERM(vr12,vr4,vr3,vr16)
+ vperm vr12,vr4,vr3,vr16
err4; lvx vr2,r4,r14
- VPERM(vr13,vr3,vr2,vr16)
+ vperm vr13,vr3,vr2,vr16
err4; lvx vr1,r4,r15
- VPERM(vr14,vr2,vr1,vr16)
+ vperm vr14,vr2,vr1,vr16
err4; lvx vr0,r4,r16
- VPERM(vr15,vr1,vr0,vr16)
+ vperm vr15,vr1,vr0,vr16
addi r4,r4,128
err4; stvx vr8,r0,r3
err4; stvx vr9,r3,r9
@@ -657,13 +649,13 @@ err4; stvx vr15,r3,r16
bf cr7*4+1,9f
err3; lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
+ vperm vr8,vr0,vr3,vr16
err3; lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
+ vperm vr9,vr3,vr2,vr16
err3; lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
+ vperm vr10,vr2,vr1,vr16
err3; lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ vperm vr11,vr1,vr0,vr16
addi r4,r4,64
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -673,9 +665,9 @@ err3; stvx vr11,r3,r11
9: bf cr7*4+2,10f
err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
err3; lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ vperm vr9,vr1,vr0,vr16
addi r4,r4,32
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -683,7 +675,7 @@ err3; stvx vr9,r3,r9
10: bf cr7*4+3,11f
err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
addi r4,r4,16
err3; stvx vr8,r0,r3
addi r3,r3,16
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index e4177db..0663630 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -20,15 +20,6 @@
#include <asm/ppc_asm.h>
_GLOBAL(memcpy_power7)
-
-#ifdef __BIG_ENDIAN__
-#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
-#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
-#else
-#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
-#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
-#endif
-
#ifdef CONFIG_ALTIVEC
cmpldi r5,16
cmpldi cr1,r5,4096
@@ -494,13 +485,13 @@ _GLOBAL(memcpy_power7)
li r10,32
li r11,48
- LVS(vr16,0,r4) /* Setup permute control vector */
+ lvsl vr16,0,r4 /* Setup permute control vector */
lvx vr0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
addi r4,r4,16
stvx vr8,r0,r3
addi r3,r3,16
@@ -508,9 +499,9 @@ _GLOBAL(memcpy_power7)
5: bf cr7*4+2,6f
lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ vperm vr9,vr1,vr0,vr16
addi r4,r4,32
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -518,13 +509,13 @@ _GLOBAL(memcpy_power7)
6: bf cr7*4+1,7f
lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
+ vperm vr8,vr0,vr3,vr16
lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
+ vperm vr9,vr3,vr2,vr16
lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
+ vperm vr10,vr2,vr1,vr16
lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ vperm vr11,vr1,vr0,vr16
addi r4,r4,64
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -553,21 +544,21 @@ _GLOBAL(memcpy_power7)
.align 5
8:
lvx vr7,r0,r4
- VPERM(vr8,vr0,vr7,vr16)
+ vperm vr8,vr0,vr7,vr16
lvx vr6,r4,r9
- VPERM(vr9,vr7,vr6,vr16)
+ vperm vr9,vr7,vr6,vr16
lvx vr5,r4,r10
- VPERM(vr10,vr6,vr5,vr16)
+ vperm vr10,vr6,vr5,vr16
lvx vr4,r4,r11
- VPERM(vr11,vr5,vr4,vr16)
+ vperm vr11,vr5,vr4,vr16
lvx vr3,r4,r12
- VPERM(vr12,vr4,vr3,vr16)
+ vperm vr12,vr4,vr3,vr16
lvx vr2,r4,r14
- VPERM(vr13,vr3,vr2,vr16)
+ vperm vr13,vr3,vr2,vr16
lvx vr1,r4,r15
- VPERM(vr14,vr2,vr1,vr16)
+ vperm vr14,vr2,vr1,vr16
lvx vr0,r4,r16
- VPERM(vr15,vr1,vr0,vr16)
+ vperm vr15,vr1,vr0,vr16
addi r4,r4,128
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -591,13 +582,13 @@ _GLOBAL(memcpy_power7)
bf cr7*4+1,9f
lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
+ vperm vr8,vr0,vr3,vr16
lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
+ vperm vr9,vr3,vr2,vr16
lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
+ vperm vr10,vr2,vr1,vr16
lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ vperm vr11,vr1,vr0,vr16
addi r4,r4,64
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -607,9 +598,9 @@ _GLOBAL(memcpy_power7)
9: bf cr7*4+2,10f
lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ vperm vr9,vr1,vr0,vr16
addi r4,r4,32
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -617,7 +608,7 @@ _GLOBAL(memcpy_power7)
10: bf cr7*4+3,11f
lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ vperm vr8,vr0,vr1,vr16
addi r4,r4,16
stvx vr8,r0,r3
addi r3,r3,16
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index c0511c2..b1faa15 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -212,19 +212,11 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
{
int err;
unsigned long x, b, c;
-#ifdef __LITTLE_ENDIAN__
- int len = nb; /* save a copy of the length for byte reversal */
-#endif
/* unaligned, do this in pieces */
x = 0;
for (; nb > 0; nb -= c) {
-#ifdef __LITTLE_ENDIAN__
- c = 1;
-#endif
-#ifdef __BIG_ENDIAN__
c = max_align(ea);
-#endif
if (c > nb)
c = max_align(nb);
err = read_mem_aligned(&b, ea, c);
@@ -233,24 +225,7 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
x = (x << (8 * c)) + b;
ea += c;
}
-#ifdef __LITTLE_ENDIAN__
- switch (len) {
- case 2:
- *dest = byterev_2(x);
- break;
- case 4:
- *dest = byterev_4(x);
- break;
-#ifdef __powerpc64__
- case 8:
- *dest = byterev_8(x);
- break;
-#endif
- }
-#endif
-#ifdef __BIG_ENDIAN__
*dest = x;
-#endif
return 0;
}
@@ -298,29 +273,9 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
int err;
unsigned long c;
-#ifdef __LITTLE_ENDIAN__
- switch (nb) {
- case 2:
- val = byterev_2(val);
- break;
- case 4:
- val = byterev_4(val);
- break;
-#ifdef __powerpc64__
- case 8:
- val = byterev_8(val);
- break;
-#endif
- }
-#endif
/* unaligned or little-endian, do this in pieces */
for (; nb > 0; nb -= c) {
-#ifdef __LITTLE_ENDIAN__
- c = 1;
-#endif
-#ifdef __BIG_ENDIAN__
c = max_align(ea);
-#endif
if (c > nb)
c = max_align(nb);
err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
@@ -355,36 +310,22 @@ static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
struct pt_regs *regs)
{
int err;
- union {
- double dbl;
- unsigned long ul[2];
- struct {
-#ifdef __BIG_ENDIAN__
- unsigned _pad_;
- unsigned word;
-#endif
-#ifdef __LITTLE_ENDIAN__
- unsigned word;
- unsigned _pad_;
-#endif
- } single;
- } data;
+ unsigned long val[sizeof(double) / sizeof(long)];
unsigned long ptr;
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & 3) == 0)
return (*func)(rn, ea);
- ptr = (unsigned long) &data.ul;
+ ptr = (unsigned long) &val[0];
if (sizeof(unsigned long) == 8 || nb == 4) {
- err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
- if (nb == 4)
- ptr = (unsigned long)&(data.single.word);
+ err = read_mem_unaligned(&val[0], ea, nb, regs);
+ ptr += sizeof(unsigned long) - nb;
} else {
/* reading a double on 32-bit */
- err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
+ err = read_mem_unaligned(&val[0], ea, 4, regs);
if (!err)
- err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
+ err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
}
if (err)
return err;
@@ -396,42 +337,28 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
struct pt_regs *regs)
{
int err;
- union {
- double dbl;
- unsigned long ul[2];
- struct {
-#ifdef __BIG_ENDIAN__
- unsigned _pad_;
- unsigned word;
-#endif
-#ifdef __LITTLE_ENDIAN__
- unsigned word;
- unsigned _pad_;
-#endif
- } single;
- } data;
+ unsigned long val[sizeof(double) / sizeof(long)];
unsigned long ptr;
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & 3) == 0)
return (*func)(rn, ea);
- ptr = (unsigned long) &data.ul[0];
+ ptr = (unsigned long) &val[0];
if (sizeof(unsigned long) == 8 || nb == 4) {
- if (nb == 4)
- ptr = (unsigned long)&(data.single.word);
+ ptr += sizeof(unsigned long) - nb;
err = (*func)(rn, ptr);
if (err)
return err;
- err = write_mem_unaligned(data.ul[0], ea, nb, regs);
+ err = write_mem_unaligned(val[0], ea, nb, regs);
} else {
/* writing a double on 32-bit */
err = (*func)(rn, ptr);
if (err)
return err;
- err = write_mem_unaligned(data.ul[0], ea, 4, regs);
+ err = write_mem_unaligned(val[0], ea, 4, regs);
if (!err)
- err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
+ err = write_mem_unaligned(val[1], ea + 4, 4, regs);
}
return err;
}
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
deleted file mode 100644
index e905f7c..0000000
--- a/arch/powerpc/lib/xor_vmx.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2012
- *
- * Author: Anton Blanchard <anton@au.ibm.com>
- */
-#include <altivec.h>
-
-#include <linux/preempt.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <asm/switch_to.h>
-
-typedef vector signed char unative_t;
-
-#define DEFINE(V) \
- unative_t *V = (unative_t *)V##_in; \
- unative_t V##_0, V##_1, V##_2, V##_3
-
-#define LOAD(V) \
- do { \
- V##_0 = V[0]; \
- V##_1 = V[1]; \
- V##_2 = V[2]; \
- V##_3 = V[3]; \
- } while (0)
-
-#define STORE(V) \
- do { \
- V[0] = V##_0; \
- V[1] = V##_1; \
- V[2] = V##_2; \
- V[3] = V##_3; \
- } while (0)
-
-#define XOR(V1, V2) \
- do { \
- V1##_0 = vec_xor(V1##_0, V2##_0); \
- V1##_1 = vec_xor(V1##_1, V2##_1); \
- V1##_2 = vec_xor(V1##_2, V2##_2); \
- V1##_3 = vec_xor(V1##_3, V2##_3); \
- } while (0)
-
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
-{
- DEFINE(v1);
- DEFINE(v2);
- unsigned long lines = bytes / (sizeof(unative_t)) / 4;
-
- preempt_disable();
- enable_kernel_altivec();
-
- do {
- LOAD(v1);
- LOAD(v2);
- XOR(v1, v2);
- STORE(v1);
-
- v1 += 4;
- v2 += 4;
- } while (--lines > 0);
-
- preempt_enable();
-}
-EXPORT_SYMBOL(xor_altivec_2);
-
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
-{
- DEFINE(v1);
- DEFINE(v2);
- DEFINE(v3);
- unsigned long lines = bytes / (sizeof(unative_t)) / 4;
-
- preempt_disable();
- enable_kernel_altivec();
-
- do {
- LOAD(v1);
- LOAD(v2);
- LOAD(v3);
- XOR(v1, v2);
- XOR(v1, v3);
- STORE(v1);
-
- v1 += 4;
- v2 += 4;
- v3 += 4;
- } while (--lines > 0);
-
- preempt_enable();
-}
-EXPORT_SYMBOL(xor_altivec_3);
-
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
-{
- DEFINE(v1);
- DEFINE(v2);
- DEFINE(v3);
- DEFINE(v4);
- unsigned long lines = bytes / (sizeof(unative_t)) / 4;
-
- preempt_disable();
- enable_kernel_altivec();
-
- do {
- LOAD(v1);
- LOAD(v2);
- LOAD(v3);
- LOAD(v4);
- XOR(v1, v2);
- XOR(v3, v4);
- XOR(v1, v3);
- STORE(v1);
-
- v1 += 4;
- v2 += 4;
- v3 += 4;
- v4 += 4;
- } while (--lines > 0);
-
- preempt_enable();
-}
-EXPORT_SYMBOL(xor_altivec_4);
-
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
-{
- DEFINE(v1);
- DEFINE(v2);
- DEFINE(v3);
- DEFINE(v4);
- DEFINE(v5);
- unsigned long lines = bytes / (sizeof(unative_t)) / 4;
-
- preempt_disable();
- enable_kernel_altivec();
-
- do {
- LOAD(v1);
- LOAD(v2);
- LOAD(v3);
- LOAD(v4);
- LOAD(v5);
- XOR(v1, v2);
- XOR(v3, v4);
- XOR(v1, v5);
- XOR(v1, v3);
- STORE(v1);
-
- v1 += 4;
- v2 += 4;
- v3 += 4;
- v4 += 4;
- v5 += 4;
- } while (--lines > 0);
-
- preempt_enable();
-}
-EXPORT_SYMBOL(xor_altivec_5);