summaryrefslogtreecommitdiff
path: root/arch/hexagon/mm/copy_from_user.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 14:48:13 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 14:48:13 (GMT)
commitc87d5d594736dd8b56df67e31846c7d7b8c41a8f (patch)
treea4496b74b932e55b544d040af2668e68abcb1e56 /arch/hexagon/mm/copy_from_user.S
parent094803e0aab3fe75bbf8202a8f4b5280eaade375 (diff)
parent4e29198e1cd7728c30c96a8483a6068c71b34e4e (diff)
downloadlinux-fsl-qoriq-c87d5d594736dd8b56df67e31846c7d7b8c41a8f.tar.xz
Merge Qualcom Hexagon architecture
This is the fifth version of the patchset (with one tiny whitespace fix) to the Linux kernel to support the Qualcomm Hexagon architecture. Between now and the next pull requests, Richard Kuo should have his key signed, etc., and should be back on kernel.org. In the meantime, this got merged as a emailed patch-series. * Hexagon: (36 commits) Add extra arch overrides to asm-generic/checksum.h Hexagon: Add self to MAINTAINERS Hexagon: Add basic stacktrace functionality for Hexagon architecture. Hexagon: Add configuration and makefiles for the Hexagon architecture. Hexagon: Comet platform support Hexagon: kgdb support files Hexagon: Add page-fault support. Hexagon: Add page table header files & etc. Hexagon: Add ioremap support Hexagon: Provide DMA implementation Hexagon: Implement basic TLB management routines for Hexagon. Hexagon: Implement basic cache-flush support Hexagon: Provide basic implementation and/or stubs for I/O routines. Hexagon: Add user access functions Hexagon: Add locking types and functions Hexagon: Add SMP support Hexagon: Provide basic debugging and system trap support. Hexagon: Add ptrace support Hexagon: Add time and timer functions Hexagon: Add interrupts ...
Diffstat (limited to 'arch/hexagon/mm/copy_from_user.S')
-rw-r--r--arch/hexagon/mm/copy_from_user.S114
1 files changed, 114 insertions, 0 deletions
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
new file mode 100644
index 0000000..8eb1d4d
--- /dev/null
+++ b/arch/hexagon/mm/copy_from_user.S
@@ -0,0 +1,114 @@
+/*
+ * User memory copy functions for kernel
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy from user: loads can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME __copy_from_user_hexagon
+#include "copy_user_template.S"
+
+ /* LOAD FAULTS from COPY_FROM_USER */
+
+ /* Alignment loop. r2 has been updated. Return it. */
+ .falign
+1009:
+2009:
+4009:
+ {
+ r0 = r2
+ jumpr r31
+ }
+ /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
+ /* X - (A - B) == X + B - A */
+ .falign
+8089:
+ {
+ memd(dst) = d_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+4089:
+ {
+ memw(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+2089:
+ {
+ memh(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+1089:
+ {
+ memb(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+
+ /* COPY FROM USER: only loads can fail */
+
+ .section __ex_table,"a"
+ .long 1000b,1009b
+ .long 2000b,2009b
+ .long 4000b,4009b
+ .long 8080b,8089b
+ .long 4080b,4089b
+ .long 2080b,2089b
+ .long 1080b,1089b
+ .previous