summaryrefslogtreecommitdiff
path: root/arch/tile/lib/atomic_32.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-10-07 16:30:02 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2013-10-07 16:30:02 (GMT)
commit4f97c9b206c4bf6ed5ad31091ec2eb9c2204e44c (patch)
treefe5e840595303db13e742f968d6ee8682cb97fc2 /arch/tile/lib/atomic_32.c
parent162bdafa46bfbc10c564de3c1aca2708440181e8 (diff)
parent0cc96a745059e8daf1d7d6a26672f0ad9056e989 (diff)
downloadlinux-fsl-qoriq-4f97c9b206c4bf6ed5ad31091ec2eb9c2204e44c.tar.xz
Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull Tile bugfixes from Chris Metcalf: "This fixes some serious issues with PREEMPT support, and a couple of smaller corner-case issues fixed in the last couple of weeks" * 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: arch: tile: re-use kbasename() helper tile: use a more conservative __my_cpu_offset in CONFIG_PREEMPT tile: ensure interrupts disabled for preempt_schedule_irq() tile: change lock initalization in hardwall tile: include: asm: use 'long long' instead of 'u64' for atomic64_t and its related functions
Diffstat (limited to 'arch/tile/lib/atomic_32.c')
-rw-r--r--arch/tile/lib/atomic_32.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa3..c89b211 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
EXPORT_SYMBOL(_atomic_xor);
-u64 _atomic64_xchg(u64 *v, u64 n)
+long long _atomic64_xchg(long long *v, long long n)
{
return __atomic64_xchg(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_xchg);
-u64 _atomic64_xchg_add(u64 *v, u64 i)
+long long _atomic64_xchg_add(long long *v, long long i)
{
return __atomic64_xchg_add(v, __atomic_setup(v), i);
}
EXPORT_SYMBOL(_atomic64_xchg_add);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
{
/*
* Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
}
EXPORT_SYMBOL(_atomic64_xchg_add_unless);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
+long long _atomic64_cmpxchg(long long *v, long long o, long long n)
{
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
}