summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/atomic_32.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-29 17:39:51 (GMT)
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 16:48:23 (GMT)
commit47d632f9f8f3ed62b21f725e98b726d65769b6d7 (patch)
tree9599e3a0106ee320b293be1dbc2d4dbb93b6e1ff /arch/tile/include/asm/atomic_32.h
parent1efea40d4172a2a475ccb29b59d6221e9d0c174b (diff)
downloadlinux-47d632f9f8f3ed62b21f725e98b726d65769b6d7.tar.xz
arch/tile: optimize get_user/put_user and friends
Use direct load/store for the get_user/put_user. Previously, we would call out to a helper routine that would do the appropriate thing and then return, handling the possible exception internally. Now we inline the load or store, along with a "we succeeded" indication in a register; if the load or store faults, we write a "we failed" indication into the same register and then return to the following instruction. This is more efficient and gives us more compact code, as well as being more in line with what other architectures do. The special futex assembly source file for TILE-Gx also disappears in this change; we just use the same inlining idiom there as well, putting the appropriate atomic operations directly into futex_atomic_op_inuser() (and thus into the FUTEX_WAIT function). The underlying atomic copy_from_user, copy_to_user functions were renamed using the (cryptic) x86 convention as copy_from_user_ll and copy_to_user_ll. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/atomic_32.h')
-rw-r--r--arch/tile/include/asm/atomic_32.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 54d1da8..e7fb5cf 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -303,7 +303,14 @@ void __init_atomic_per_cpu(void);
void __atomic_fault_unlock(int *lock_ptr);
#endif
+/* Return a pointer to the lock for the given address. */
+int *__atomic_hashed_lock(volatile void *v);
+
/* Private helper routines in lib/atomic_asm_32.S */
+struct __get_user {
+ unsigned long val;
+ int err;
+};
extern struct __get_user __atomic_cmpxchg(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
@@ -319,6 +326,9 @@ extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
int *lock, u64 o, u64 n);
+/* Return failure from the atomic wrappers. */
+struct __get_user __atomic_bad_address(int __user *addr);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_32_H */