summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/atomic.h190
-rw-r--r--arch/s390/include/asm/bitops.h1008
-rw-r--r--arch/s390/include/asm/compat.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h112
-rw-r--r--arch/s390/include/asm/debug.h5
-rw-r--r--arch/s390/include/asm/dis.h52
-rw-r--r--arch/s390/include/asm/fcx.h38
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/mmu_context.h10
-rw-r--r--arch/s390/include/asm/page.h7
-rw-r--r--arch/s390/include/asm/pci_debug.h5
-rw-r--r--arch/s390/include/asm/pci_insn.h15
-rw-r--r--arch/s390/include/asm/percpu.h137
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/setup.h7
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/switch_to.h124
-rw-r--r--arch/s390/include/asm/timex.h6
-rw-r--r--arch/s390/include/asm/uaccess.h18
20 files changed, 771 insertions, 1004 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c797832..fa9aaf7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -19,21 +19,50 @@
#define ATOMIC_INIT(i) { (i) }
-#define __CS_LOOP(ptr, op_val, op_string) ({ \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR "lao"
+#define __ATOMIC_AND "lan"
+#define __ATOMIC_ADD "laa"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+({ \
+ int old_val; \
+ \
+ typecheck(atomic_t *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR "or"
+#define __ATOMIC_AND "nr"
+#define __ATOMIC_ADD "ar"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+({ \
int old_val, new_val; \
+ \
+ typecheck(atomic_t *, ptr); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), \
- "=Q" (((atomic_t *)(ptr))->counter) \
- : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
: "cc", "memory"); \
- new_val; \
+ old_val; \
})
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __CS_LOOP(v, i, "ar");
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
}
-#define atomic_add(_i, _v) atomic_add_return(_i, _v)
-#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add_return(1, _v)
-#define atomic_inc_return(_v) atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- return __CS_LOOP(v, i, "sr");
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "asi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ } else {
+ atomic_add_return(i, v);
+ }
+#else
+ atomic_add_return(i, v);
+#endif
}
-#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
+
+#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v) atomic_add(1, _v)
+#define atomic_inc_return(_v) atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
+#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub_return(1, _v)
+#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- __CS_LOOP(v, ~mask, "nr");
+ __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
}
-static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- __CS_LOOP(v, mask, "or");
+ __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
asm volatile(
" cs %0,%2,%1"
- : "+d" (old), "=Q" (v->counter)
- : "d" (new), "Q" (v->counter)
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
: "cc", "memory");
return old;
}
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
-#undef __CS_LOOP
+#undef __ATOMIC_LOOP
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT
-#define __CSG_LOOP(ptr, op_val, op_string) ({ \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+({ \
+ long long old_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+({ \
long long old_val, new_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), \
- "=Q" (((atomic_t *)(ptr))->counter) \
- : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
: "cc", "memory"); \
- new_val; \
+ old_val; \
})
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline long long atomic64_read(const atomic64_t *v)
{
long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- return __CSG_LOOP(v, i, "agr");
-}
-
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- return __CSG_LOOP(v, i, "sgr");
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
}
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, ~mask, "ngr");
+ __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, mask, "ogr");
+ __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
{
asm volatile(
" csg %0,%2,%1"
- : "+d" (old), "=Q" (v->counter)
- : "d" (new), "Q" (v->counter)
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
: "cc", "memory");
return old;
}
-#undef __CSG_LOOP
+#undef __ATOMIC64_LOOP
#else /* CONFIG_64BIT */
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
" lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n"
" jl 0b\n"
- : "=&d" (rp_old), "=Q" (v->counter)
- : "d" (rp_new), "Q" (v->counter)
+ : "=&d" (rp_old), "+Q" (v->counter)
+ : "d" (rp_new)
: "cc");
return rp_old.pair;
}
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
asm volatile(
" cds %0,%2,%1"
- : "+&d" (rp_old), "=Q" (v->counter)
- : "d" (rp_new), "Q" (v->counter)
+ : "+&d" (rp_old), "+Q" (v->counter)
+ : "d" (rp_new)
: "cc");
return rp_old.pair;
}
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return new;
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old - i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
#endif /* CONFIG_64BIT */
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "agsi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ } else {
+ atomic64_add_return(i, v);
+ }
+#else
+ atomic64_add_return(i, v);
+#endif
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
for (;;) {
if (unlikely(c == u))
break;
- old = atomic64_cmpxchg(v, c, c + a);
+ old = atomic64_cmpxchg(v, c, c + i);
if (likely(old == c))
break;
c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
-#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add_return(1, _v)
+#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 10135a3..6e6ad06 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,10 +1,40 @@
/*
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Copyright IBM Corp. 1999,2013
*
- * Derived from "include/asm-i386/bitops.h"
- * Copyright (C) 1992, Linus Torvalds
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *
+ * The description below was taken in large parts from the powerpc
+ * bitops header file:
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for an
+ * s390x system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on s390:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * We also have special functions which work with an MSB0 encoding:
+ * on an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
+ * number field needs to be reversed compared to the LSB0 encoded bit
+ * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
*
*/
@@ -15,556 +45,348 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <linux/typecheck.h>
#include <linux/compiler.h>
-/*
- * 32 bit bitops format:
- * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
- * bit 32 is the LSB of *(addr+4). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
- * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- *
- * 64 bit bitops format:
- * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
- * bit 64 is the LSB of *(addr+8). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
- * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
- * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
- * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
- * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- */
-
-/* bitmap tables from arch/s390/kernel/bitmap.c */
-extern const char _oi_bitmap[];
-extern const char _ni_bitmap[];
-extern const char _zb_findmap[];
-extern const char _sb_findmap[];
-
#ifndef CONFIG_64BIT
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), \
- "=Q" (*(unsigned long *) __addr) \
- : "d" (__val), "Q" (*(unsigned long *) __addr) \
- : "cc");
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
#else /* CONFIG_64BIT */
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __BITOPS_OR "laog"
+#define __BITOPS_AND "lang"
+#define __BITOPS_XOR "laxg"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old; \
+ \
+ typecheck(unsigned long *, (__addr)); \
+ asm volatile( \
+ __op_string " %0,%2,%1\n" \
+ : "=d" (__old), "+Q" (*(__addr)) \
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), \
- "=Q" (*(unsigned long *) __addr) \
- : "d" (__val), "Q" (*(unsigned long *) __addr) \
- : "cc");
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-#ifdef CONFIG_SMP
-/*
- * SMP safe set_bit routine based on compare and swap (CS)
- */
-static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long *
+__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+ return (unsigned long *)addr;
+}
+
+static inline unsigned char *
+__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+}
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make OR mask */
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
+
+ asm volatile(
+ "oi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc");
+ return;
+ }
+#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_OR);
}
-/*
- * SMP safe clear_bit routine based on compare and swap (CS)
- */
-static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make AND mask */
+ asm volatile(
+ "ni %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (~(1 << (nr & 7)))
+ : "cc");
+ return;
+ }
+#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+ __BITOPS_LOOP(addr, mask, __BITOPS_AND);
}
-/*
- * SMP safe change_bit routine based on compare and swap (CS)
- */
-static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make XOR mask */
+ asm volatile(
+ "xi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc");
+ return;
+ }
+#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
}
-/*
- * SMP safe test_and_set_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make OR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
barrier();
return (old & mask) != 0;
}
-/*
- * SMP safe test_and_clear_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make AND/test mask */
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
barrier();
- return (old ^ new) != 0;
+ return (old & ~mask) != 0;
}
-/*
- * SMP safe test_and_change_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make XOR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
barrier();
return (old & mask) != 0;
}
-#endif /* CONFIG_SMP */
-/*
- * fast, non-SMP set_bit routine
- */
static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " oc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr |= 1 << (nr & 7);
+ *addr |= 1 << (nr & 7);
}
-#define set_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_set_bit((nr),(addr)) : \
- __set_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP clear_bit routine
- */
static inline void
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " nc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr &= ~(1 << (nr & 7));
+ *addr &= ~(1 << (nr & 7));
}
-#define clear_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_clear_bit((nr),(addr)) : \
- __clear_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP change_bit routine
- */
static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " xc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr ^= 1 << (nr & 7);
+ *addr ^= 1 << (nr & 7);
}
-#define change_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_change_bit((nr),(addr)) : \
- __change_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP test_and_set_bit routine
- */
static inline int
-test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " oc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr |= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
-/*
- * fast, non-SMP test_and_clear_bit routine
- */
static inline int
-test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " nc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr &= ~(1 << (nr & 7));
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
-/*
- * fast, non-SMP test_and_change_bit routine
- */
static inline int
-test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " xc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr ^= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
-
-#ifdef CONFIG_SMP
-#define set_bit set_bit_cs
-#define clear_bit clear_bit_cs
-#define change_bit change_bit_cs
-#define test_and_set_bit test_and_set_bit_cs
-#define test_and_clear_bit test_and_clear_bit_cs
-#define test_and_change_bit test_and_change_bit_cs
-#else
-#define set_bit set_bit_simple
-#define clear_bit clear_bit_simple
-#define change_bit change_bit_simple
-#define test_and_set_bit test_and_set_bit_simple
-#define test_and_clear_bit test_and_clear_bit_simple
-#define test_and_change_bit test_and_change_bit_simple
-#endif
-
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
{
- unsigned long addr;
- unsigned char ch;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(volatile unsigned char *) addr;
- return (ch >> (nr & 7)) & 1;
-}
+ const volatile unsigned char *addr;
-static inline int
-__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
- return (((volatile char *) addr)
- [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
+ addr = ((const volatile unsigned char *)ptr);
+ addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
+ return (*addr >> (nr & 7)) & 1;
}
-#define test_bit(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)) )
-
/*
- * Optimized find bit helper functions.
- */
-
-/**
- * __ffz_word_loop - find byte offset of first long != -1UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
+ * Functions which use MSB0 bit numbering.
+ * On an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
*/
-static inline unsigned long __ffz_word_loop(const unsigned long *addr,
- unsigned long size)
-{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long bytes = 0;
-
- asm volatile(
-#ifndef CONFIG_64BIT
- " ahi %1,-1\n"
- " sra %1,5\n"
- " jz 1f\n"
- "0: c %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#else
- " aghi %1,-1\n"
- " srag %1,%1,6\n"
- " jz 1f\n"
- "0: cg %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#endif
- : "+&a" (bytes), "+&d" (size)
- : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
- : "cc" );
- return bytes;
-}
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset);
-/**
- * __ffs_word_loop - find byte offset of first long != 0UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
- */
-static inline unsigned long __ffs_word_loop(const unsigned long *addr,
- unsigned long size)
+static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long bytes = 0;
-
- asm volatile(
-#ifndef CONFIG_64BIT
- " ahi %1,-1\n"
- " sra %1,5\n"
- " jz 1f\n"
- "0: c %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#else
- " aghi %1,-1\n"
- " srag %1,%1,6\n"
- " jz 1f\n"
- "0: cg %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#endif
- : "+&a" (bytes), "+&a" (size)
- : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
- : "cc" );
- return bytes;
+ return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __ffz_word - add number of the first unset bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for unset bits
- */
-static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
+static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
-#ifdef CONFIG_64BIT
- if ((word & 0xffffffff) == 0xffffffff) {
- word >>= 32;
- nr += 32;
- }
-#endif
- if ((word & 0xffff) == 0xffff) {
- word >>= 16;
- nr += 16;
- }
- if ((word & 0xff) == 0xff) {
- word >>= 8;
- nr += 8;
- }
- return nr + _zb_findmap[(unsigned char) word];
+ return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __ffs_word - add number of the first set bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for set bits
- */
-static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
+static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
-#ifdef CONFIG_64BIT
- if ((word & 0xffffffff) == 0) {
- word >>= 32;
- nr += 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- word >>= 16;
- nr += 16;
- }
- if ((word & 0xff) == 0) {
- word >>= 8;
- nr += 8;
- }
- return nr + _sb_findmap[(unsigned char) word];
+ return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-
-/**
- * __load_ulong_be - load big endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_be(const unsigned long *p,
- unsigned long offset)
+static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
- p = (unsigned long *)((unsigned long) p + offset);
- return *p;
+ return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __load_ulong_le - load little endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_le(const unsigned long *p,
- unsigned long offset)
+static inline int test_bit_inv(unsigned long nr,
+ const volatile unsigned long *ptr)
{
- unsigned long word;
-
- p = (unsigned long *)((unsigned long) p + offset);
-#ifndef CONFIG_64BIT
- asm volatile(
- " ic %0,%O1(%R1)\n"
- " icm %0,2,%O1+1(%R1)\n"
- " icm %0,4,%O1+2(%R1)\n"
- " icm %0,8,%O1+3(%R1)"
- : "=&d" (word) : "Q" (*p) : "cc");
-#else
- asm volatile(
- " lrvg %0,%1"
- : "=d" (word) : "m" (*p) );
-#endif
- return word;
+ return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/*
- * The various find bit functions.
- */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-/*
- * ffz - find first zero in word.
- * @word: The word to search
+/**
+ * __flogr - find leftmost one
+ * @word - The word to search
*
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- return __ffz_word(0, word);
+ * Returns the bit number of the most significant bit set,
+ * where the most significant bit has bit number 0.
+ * If no bit is set this function returns 64.
+ */
+static inline unsigned char __flogr(unsigned long word)
+{
+ if (__builtin_constant_p(word)) {
+ unsigned long bit = 0;
+
+ if (!word)
+ return 64;
+ if (!(word & 0xffffffff00000000UL)) {
+ word <<= 32;
+ bit += 32;
+ }
+ if (!(word & 0xffff000000000000UL)) {
+ word <<= 16;
+ bit += 16;
+ }
+ if (!(word & 0xff00000000000000UL)) {
+ word <<= 8;
+ bit += 8;
+ }
+ if (!(word & 0xf000000000000000UL)) {
+ word <<= 4;
+ bit += 4;
+ }
+ if (!(word & 0xc000000000000000UL)) {
+ word <<= 2;
+ bit += 2;
+ }
+ if (!(word & 0x8000000000000000UL)) {
+ word <<= 1;
+ bit += 1;
+ }
+ return bit;
+ } else {
+ register unsigned long bit asm("4") = word;
+ register unsigned long out asm("5");
+
+ asm volatile(
+ " flogr %[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return bit;
+ }
}
/**
@@ -573,337 +395,83 @@ static inline unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs (unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
- return __ffs_word(0, word);
+ return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
}
/**
* ffs - find first bit set
- * @x: the word to search
+ * @word: the word to search
*
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * This is defined the same way as the libc and
+ * compiler builtin ffs routines (man ffs).
*/
-static inline int ffs(int x)
+static inline int ffs(int word)
{
- if (!x)
- return 0;
- return __ffs_word(1, x);
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
+ unsigned int val = (unsigned int)word;
+
+ return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
}
/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
*
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
+ * Undefined if no set bit exists, so code should check against 0 first.
*/
-static inline unsigned long find_first_zero_bit(const unsigned long *addr,
- unsigned long size)
+static inline unsigned long __fls(unsigned long word)
{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffz_word_loop(addr, size);
- bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
+ return __flogr(word) ^ (BITS_PER_LONG - 1);
}
-#define find_first_zero_bit find_first_zero_bit
/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * fls64 - find last set bit in a 64-bit word
+ * @word: the word to search
*
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned long find_first_bit(const unsigned long * addr,
- unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(addr, size);
- bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_bit find_first_bit
-
-/*
- * Big endian variant whichs starts bit counting from left using
- * the flogr (find leftmost one) instruction.
- */
-static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
-{
- register unsigned long bit asm("2") = val;
- register unsigned long out asm("3");
-
- asm volatile (
- " .insn rre,0xb9830000,%[bit],%[bit]\n"
- : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
- return nr + bit;
-}
-
-/*
- * 64 bit special left bitops format:
- * order in memory:
- * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
- * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
- * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
- * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
- * after that follows the next long with bit numbers
- * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
- * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
- * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
- * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
- * The reason for this bit ordering is the fact that
- * the hardware sets bits in a bitmap starting at bit 0
- * and we don't want to scan the bitmap from the 'wrong
- * end'.
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
*/
-static inline unsigned long find_first_bit_left(const unsigned long *addr,
- unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(addr, size);
- bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
-}
-
-static inline int find_next_bit_left(const unsigned long *addr,
- unsigned long size,
- unsigned long offset)
+static inline int fls64(unsigned long word)
{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- set = __flo_word(0, *p & (~0UL >> bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit_left(p, size);
-}
-
-#define for_each_set_bit_left(bit, addr, size) \
- for ((bit) = find_first_bit_left((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit_left((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_left_cont(bit, addr, size) \
- for ((bit) = find_next_bit_left((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline int find_next_zero_bit (const unsigned long * addr,
- unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * __ffz_word returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffz_word(bit, *p >> bit);
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_zero_bit(p, size);
+ return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
}
-#define find_next_zero_bit find_next_zero_bit
/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+ * fls - find last (most-significant) bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline int find_next_bit (const unsigned long * addr,
- unsigned long size,
- unsigned long offset)
+static inline int fls(int word)
{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * __ffs_word returns BITS_PER_LONG
- * if no one bit is present in the word.
- */
- set = __ffs_word(0, *p & (~0UL << bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit(p, size);
+ return fls64((unsigned int)word);
}
-#define find_next_bit find_next_bit
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- return find_first_bit(b, 140);
-}
+#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
-#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
+#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-
-/*
- * ATTENTION: intel byte ordering convention for ext2 and minix !!
- * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
- * bit 32 is the LSB of (addr+4).
- * That combined with the little endian byte order of Intel gives the
- * following bit order in memory:
- * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
- * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
- */
-
-static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffz_word_loop(vaddr, size);
- bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
- unsigned long offset)
-{
- unsigned long *addr = vaddr, *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * s390 version of ffz returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_zero_bit_le(p, size);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(vaddr, size);
- bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline int find_next_bit_le(void *vaddr, unsigned long size,
- unsigned long offset)
-{
- unsigned long *addr = vaddr, *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * s390 version of ffz returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit_le(p, size);
-}
-#define find_next_bit_le find_next_bit_le
-
+#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
-
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index c1e7c64..4bf9da0 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -22,6 +22,7 @@
#define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
+#define PSW32_MASK_RI 0x00000080UL
#define PSW32_MASK_USER 0x0000FF00UL
@@ -35,7 +36,9 @@
#define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL
-extern u32 psw32_user_bits;
+#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
+ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index debfda3..9b69c0b 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,69 +8,59 @@
#define __ASM_CTL_REG_H
#ifdef CONFIG_64BIT
-
-#define __ctl_load(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), \
- "i" (low), "i" (high)); \
- })
-
-#define __ctl_store(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " stctg %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
- })
-
-#else /* CONFIG_64BIT */
-
-#define __ctl_load(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " lctl %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), \
- "i" (low), "i" (high)); \
-})
-
-#define __ctl_store(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " stctl %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
- })
-
-#endif /* CONFIG_64BIT */
-
-#define __ctl_set_bit(cr, bit) ({ \
- unsigned long __dummy; \
- __ctl_store(__dummy, cr, cr); \
- __dummy |= 1UL << (bit); \
- __ctl_load(__dummy, cr, cr); \
-})
-
-#define __ctl_clear_bit(cr, bit) ({ \
- unsigned long __dummy; \
- __ctl_store(__dummy, cr, cr); \
- __dummy &= ~(1UL << (bit)); \
- __ctl_load(__dummy, cr, cr); \
-})
+# define __CTL_LOAD "lctlg"
+# define __CTL_STORE "stctg"
+#else
+# define __CTL_LOAD "lctl"
+# define __CTL_STORE "stctl"
+#endif
+
+#define __ctl_load(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ __CTL_LOAD " %1,%2,%0\n" \
+ : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+}
+
+#define __ctl_store(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ __CTL_STORE " %1,%2,%0\n" \
+ : "=Q" (*(addrtype *)(&array)) \
+ : "i" (low), "i" (high)); \
+}
+
+static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg |= 1UL << bit;
+ __ctl_load(reg, cr, cr);
+}
+
+static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg &= ~(1UL << bit);
+ __ctl_load(reg, cr, cr);
+}
+
+void smp_ctl_set_bit(int cr, int bit);
+void smp_ctl_clear_bit(int cr, int bit);
#ifdef CONFIG_SMP
-
-extern void smp_ctl_set_bit(int cr, int bit);
-extern void smp_ctl_clear_bit(int cr, int bit);
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
+# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else
-
-#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif /* CONFIG_SMP */
+# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+#endif
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 188c505..530c15e 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
+static inline bool debug_level_enabled(debug_info_t* id, int level)
+{
+ return level <= id->level;
+}
+
static inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length)
{
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
new file mode 100644
index 0000000..04a83f5
--- /dev/null
+++ b/arch/s390/include/asm/dis.h
@@ -0,0 +1,52 @@
+/*
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef __ASM_S390_DIS_H__
+#define __ASM_S390_DIS_H__
+
+/* Type of operand */
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_DISP 0x10 /* Operand printed as displacement */
+#define OPERAND_BASE 0x20 /* Operand printed as base register */
+#define OPERAND_INDEX 0x40 /* Operand printed as index register */
+#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
+
+
+struct s390_operand {
+ int bits; /* The number of bits in the operand. */
+ int shift; /* The number of bits to shift. */
+ int flags; /* One bit syntax flags. */
+};
+
+struct s390_insn {
+ const char name[5];
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+
+static inline int insn_length(unsigned char code)
+{
+ return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+void show_code(struct pt_regs *regs);
+void print_fn_code(unsigned char *code, unsigned long len);
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+struct s390_insn *find_insn(unsigned char *code);
+
+static inline int is_known_insn(unsigned char *code)
+{
+ return !!find_insn(code);
+}
+
+#endif /* __ASM_S390_DIS_H__ */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index ef61709..7ecb92b 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -12,9 +12,9 @@
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
-#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5)
-#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6)
-#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7)
+#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
+#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
+#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
@@ -54,11 +54,11 @@ struct tcw {
u32 intrg;
} __attribute__ ((packed, aligned(64)));
-#define TIDAW_FLAGS_LAST 1 << (7 - 0)
-#define TIDAW_FLAGS_SKIP 1 << (7 - 1)
-#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2)
-#define TIDAW_FLAGS_TTIC 1 << (7 - 3)
-#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4)
+#define TIDAW_FLAGS_LAST (1 << (7 - 0))
+#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
+#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
+#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
+#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
/**
* struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
u8 sense[32];
} __attribute__ ((packed));
-#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0)
-#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1)
-#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2)
+#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
+#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
/**
* struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
#define TSB_FORMAT_DDPC 2
#define TSB_FORMAT_INTRG 3
-#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0)
-#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1)
-#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2)
-#define TSB_FLAGS_TIME_VALID 1 << (7 - 3)
+#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
+#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
+#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
+#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
#define TSB_FLAGS_FORMAT(x) ((x) & 7)
#define TSB_FORMAT(t) ((t)->flags & 7)
@@ -179,9 +179,9 @@ struct tsb {
#define DCW_INTRG_RCQ_PRIMARY 1
#define DCW_INTRG_RCQ_SECONDARY 2
-#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0)
-#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1)
-#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2)
+#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
+#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
+#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
/**
* struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
u8 prog_data[0];
} __attribute__ ((packed));
-#define DCW_FLAGS_CC 1 << (7 - 1)
+#define DCW_FLAGS_CC (1 << (7 - 1))
#define DCW_CMD_WRITE 0x01
#define DCW_CMD_READ 0x02
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2bd6cb8..2fcccc0 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -7,6 +7,7 @@
#ifndef _ASM_S390_IPL_H
#define _ASM_S390_IPL_H
+#include <asm/lowcore.h>
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
@@ -86,7 +87,14 @@ struct ipl_parameter_block {
*/
extern u32 ipl_flags;
extern u32 dump_prefix_page;
-extern unsigned int zfcpdump_prefix_array[];
+
+struct dump_save_areas {
+ struct save_area **areas;
+ int count;
+};
+
+extern struct dump_save_areas dump_save_areas;
+struct save_area *dump_save_area_create(int cpu);
extern void do_reipl(void);
extern void do_halt(void);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 9f973d8..5d1f950 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- if (s390_user_mode != HOME_SPACE_MODE) {
- /* Load primary space page table origin. */
- asm volatile(LCTL_OPCODE" 1,1,%0\n"
- : : "m" (S390_lowcore.user_asce) );
- } else
- /* Load home space page table origin. */
- asm volatile(LCTL_OPCODE" 13,13,%0"
- : : "m" (S390_lowcore.user_asce) );
+ /* Load primary space page table origin. */
+ asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
set_fs(current->thread.mm_segment);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 1e51f29..316c850 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,7 +30,12 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
-void storage_key_init_range(unsigned long start, unsigned long end);
+static inline void storage_key_init_range(unsigned long start, unsigned long end)
+{
+#if PAGE_DEFAULT_KEY
+ __storage_key_init_range(start, end);
+#endif
+}
static inline void clear_page(void *page)
{
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 1ca5d10..ac24b26 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -6,14 +6,9 @@
extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
-#ifdef CONFIG_PCI_DEBUG
#define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
-#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(imp, fmt, args...) do { } while (0)
-#endif
-
#define zpci_err(text...) \
do { \
char debug_buffer[16]; \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index df6eac9..649eb62 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -54,11 +54,9 @@
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
- u32 reserved1;
+ u32 : 32;
u8 fc; /* function controls */
- u8 reserved2;
- u16 reserved3;
- u32 reserved4;
+ u64 : 56;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
@@ -70,14 +68,13 @@ struct zpci_fib {
u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */
- u32 reserved5;
+ u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */
- u64 reserved6;
- u64 reserved7;
-} __packed;
-
+ u32 : 32;
+ u32 gd;
+} __packed __aligned(8);
int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 86fe0ee..fa91e00 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,16 +10,22 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
+#ifdef CONFIG_64BIT
+
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define arch_this_cpu_to_op(pcp, val, op) \
+/*
+ * We use a compare-and-swap loop since that uses less cpu cycles than
+ * disabling and enabling interrupts like the generic variant would do.
+ */
+#define arch_this_cpu_to_op_simple(pcp, val, op) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
@@ -30,42 +36,101 @@
do { \
old__ = prev__; \
new__ = old__ op (val); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- prev__ = cmpxchg64(ptr__, old__, new__); \
- break; \
- default: \
- prev__ = cmpxchg(ptr__, old__, new__); \
- } \
+ prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
preempt_enable(); \
new__; \
})
-#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ if (__builtin_constant_p(val__) && \
+ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
+ asm volatile( \
+ op2 " %[ptr__],%[val__]\n" \
+ : [ptr__] "+Q" (*ptr__) \
+ : [val__] "i" ((szcast)val__) \
+ : "cc"); \
+ } else { \
+ asm volatile( \
+ op1 " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ } \
+ preempt_enable(); \
+}
-#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
-#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define arch_this_cpu_add_return(pcp, val, op) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+ old__ + val__; \
+})
-#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
-#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define arch_this_cpu_to_op(pcp, val, op) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+}
+
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
@@ -74,13 +139,7 @@
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- ret__ = cmpxchg64(ptr__, oval, nval); \
- break; \
- default: \
- ret__ = cmpxchg(ptr__, oval, nval); \
- } \
+ ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \
ret__; \
})
@@ -104,9 +163,7 @@
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
@@ -124,9 +181,9 @@
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
#include <asm-generic/percpu.h>
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index ca7821f..0a876bc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -134,19 +134,17 @@ struct stack_frame {
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
- regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
- regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
- __tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \
- update_mm(current->mm, current); \
execve_tail(); \
} while (0)
@@ -169,17 +167,15 @@ extern void release_thread(struct task_struct *);
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
-extern void show_code(struct pt_regs *regs);
-extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
- unsigned int len);
-
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
+/* Has task runtime instrumentation enabled ? */
+#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
+
static inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -348,9 +344,9 @@ __set_psw_mask(unsigned long mask)
}
#define local_mcck_enable() \
- __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \
- __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
/*
* Basic Machine Check/Program Check Handler.
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 52b5653..9c82ceb 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -10,8 +10,11 @@
#ifndef __ASSEMBLY__
-extern long psw_kernel_bits;
-extern long psw_user_bits;
+#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
+ PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+ PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
/*
* The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 59880dba..df802ee 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size);
-#define PRIMARY_SPACE_MODE 0
-#define ACCESS_REGISTER_MODE 1
-#define SECONDARY_SPACE_MODE 2
-#define HOME_SPACE_MODE 3
-
-extern unsigned int s390_user_mode;
-
/*
* Machine features detected in head.S
*/
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index b64f15c..ac9bed8 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -14,7 +14,6 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 6dbd559..29c81f8 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -13,58 +13,94 @@
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
-static inline void save_fp_regs(s390_fp_regs *fpregs)
+static inline int test_fp_ctl(u32 fpc)
{
+ u32 orig_fpc;
+ int rc;
+
+ if (!MACHINE_HAS_IEEE)
+ return 0;
+
asm volatile(
- " std 0,%O0+8(%R0)\n"
- " std 2,%O0+24(%R0)\n"
- " std 4,%O0+40(%R0)\n"
- " std 6,%O0+56(%R0)"
- : "=Q" (*fpregs) : "Q" (*fpregs));
+ " efpc %1\n"
+ " sfpc %2\n"
+ "0: sfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc), "=d" (orig_fpc)
+ : "d" (fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_ctl(u32 *fpc)
+{
if (!MACHINE_HAS_IEEE)
return;
+
asm volatile(
- " stfpc %0\n"
- " std 1,%O0+16(%R0)\n"
- " std 3,%O0+32(%R0)\n"
- " std 5,%O0+48(%R0)\n"
- " std 7,%O0+64(%R0)\n"
- " std 8,%O0+72(%R0)\n"
- " std 9,%O0+80(%R0)\n"
- " std 10,%O0+88(%R0)\n"
- " std 11,%O0+96(%R0)\n"
- " std 12,%O0+104(%R0)\n"
- " std 13,%O0+112(%R0)\n"
- " std 14,%O0+120(%R0)\n"
- " std 15,%O0+128(%R0)\n"
- : "=Q" (*fpregs) : "Q" (*fpregs));
+ " stfpc %0\n"
+ : "+Q" (*fpc));
}
-static inline void restore_fp_regs(s390_fp_regs *fpregs)
+static inline int restore_fp_ctl(u32 *fpc)
{
+ int rc;
+
+ if (!MACHINE_HAS_IEEE)
+ return 0;
+
asm volatile(
- " ld 0,%O0+8(%R0)\n"
- " ld 2,%O0+24(%R0)\n"
- " ld 4,%O0+40(%R0)\n"
- " ld 6,%O0+56(%R0)"
- : : "Q" (*fpregs));
+ "0: lfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_regs(freg_t *fprs)
+{
+ asm volatile("std 0,%0" : "=Q" (fprs[0]));
+ asm volatile("std 2,%0" : "=Q" (fprs[2]));
+ asm volatile("std 4,%0" : "=Q" (fprs[4]));
+ asm volatile("std 6,%0" : "=Q" (fprs[6]));
if (!MACHINE_HAS_IEEE)
return;
- asm volatile(
- " lfpc %0\n"
- " ld 1,%O0+16(%R0)\n"
- " ld 3,%O0+32(%R0)\n"
- " ld 5,%O0+48(%R0)\n"
- " ld 7,%O0+64(%R0)\n"
- " ld 8,%O0+72(%R0)\n"
- " ld 9,%O0+80(%R0)\n"
- " ld 10,%O0+88(%R0)\n"
- " ld 11,%O0+96(%R0)\n"
- " ld 12,%O0+104(%R0)\n"
- " ld 13,%O0+112(%R0)\n"
- " ld 14,%O0+120(%R0)\n"
- " ld 15,%O0+128(%R0)\n"
- : : "Q" (*fpregs));
+ asm volatile("std 1,%0" : "=Q" (fprs[1]));
+ asm volatile("std 3,%0" : "=Q" (fprs[3]));
+ asm volatile("std 5,%0" : "=Q" (fprs[5]));
+ asm volatile("std 7,%0" : "=Q" (fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (fprs[15]));
+}
+
+static inline void restore_fp_regs(freg_t *fprs)
+{
+ asm volatile("ld 0,%0" : : "Q" (fprs[0]));
+ asm volatile("ld 2,%0" : : "Q" (fprs[2]));
+ asm volatile("ld 4,%0" : : "Q" (fprs[4]));
+ asm volatile("ld 6,%0" : : "Q" (fprs[6]));
+ if (!MACHINE_HAS_IEEE)
+ return;
+ asm volatile("ld 1,%0" : : "Q" (fprs[1]));
+ asm volatile("ld 3,%0" : : "Q" (fprs[3]));
+ asm volatile("ld 5,%0" : : "Q" (fprs[5]));
+ asm volatile("ld 7,%0" : : "Q" (fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
static inline void save_access_regs(unsigned int *acrs)
@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
- save_fp_regs(&prev->thread.fp_regs); \
+ save_fp_ctl(&prev->thread.fp_regs.fpc); \
+ save_fp_regs(prev->thread.fp_regs.fprs); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
- restore_fp_regs(&next->thread.fp_regs); \
+ restore_fp_ctl(&next->thread.fp_regs.fpc); \
+ restore_fp_regs(next->thread.fp_regs.fprs); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 819b94d..8beee1c 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,9 +71,11 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char *clk)
+static inline void get_tod_clock_ext(char clk[16])
{
- asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
+ typedef struct { char _[sizeof(clk)]; } addrtype;
+
+ asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4..79330af 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
struct uaccess_ops {
size_t (*copy_from_user)(size_t, const void __user *, void *);
- size_t (*copy_from_user_small)(size_t, const void __user *, void *);
size_t (*copy_to_user)(size_t, void __user *, const void *);
- size_t (*copy_to_user_small)(size_t, void __user *, const void *);
size_t (*copy_in_user)(size_t, void __user *, const void __user *);
size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
};
extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_std;
extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_mvcos_switch;
extern struct uaccess_ops uaccess_pt;
extern int __handle_fault(unsigned long, unsigned long, int);
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- size = uaccess.copy_to_user_small(size, ptr, x);
+ size = uaccess.copy_to_user(size, ptr, x);
return size ? -EFAULT : size;
}
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = uaccess.copy_from_user_small(size, ptr, x);
+ size = uaccess.copy_from_user(size, ptr, x);
return size ? -EFAULT : size;
}
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_to_user_small(n, to, from);
- else
- return uaccess.copy_to_user(n, to, from);
+ return uaccess.copy_to_user(n, to, from);
}
#define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_from_user_small(n, from, to);
- else
- return uaccess.copy_from_user(n, from, to);
+ return uaccess.copy_from_user(n, from, to);
}
extern void copy_from_user_overflow(void)