summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /arch/s390/include
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/atomic.h190
-rw-r--r--arch/s390/include/asm/bitops.h1008
-rw-r--r--arch/s390/include/asm/compat.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h114
-rw-r--r--arch/s390/include/asm/debug.h5
-rw-r--r--arch/s390/include/asm/dis.h52
-rw-r--r--arch/s390/include/asm/eadm.h13
-rw-r--r--arch/s390/include/asm/fcx.h38
-rw-r--r--arch/s390/include/asm/hardirq.h2
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/include/asm/mmu_context.h10
-rw-r--r--arch/s390/include/asm/page.h45
-rw-r--r--arch/s390/include/asm/pci.h6
-rw-r--r--arch/s390/include/asm/pci_debug.h5
-rw-r--r--arch/s390/include/asm/pci_insn.h15
-rw-r--r--arch/s390/include/asm/percpu.h137
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/setup.h10
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/switch_to.h124
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timex.h6
-rw-r--r--arch/s390/include/asm/uaccess.h18
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h4
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
31 files changed, 1064 insertions, 801 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 7a5288f..f313f9c 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -2,4 +2,3 @@
generic-y += clkdev.h
generic-y += trace_clock.h
-generic-y += preempt.h
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa9aaf7..c797832 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -19,50 +19,21 @@
#define ATOMIC_INIT(i) { (i) }
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC_OR "lao"
-#define __ATOMIC_AND "lan"
-#define __ATOMIC_ADD "laa"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC_OR "or"
-#define __ATOMIC_AND "nr"
-#define __ATOMIC_ADD "ar"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string) \
-({ \
+#define __CS_LOOP(ptr, op_val, op_string) ({ \
int old_val, new_val; \
- \
- typecheck(atomic_t *, ptr); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=Q" (((atomic_t *)(ptr))->counter) \
+ : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \
- old_val; \
+ new_val; \
})
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -82,45 +53,32 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
+ return __CS_LOOP(v, i, "ar");
}
-
-static inline void atomic_add(int i, atomic_t *v)
-{
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "asi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
- } else {
- atomic_add_return(i, v);
- }
-#else
- atomic_add_return(i, v);
-#endif
-}
-
+#define atomic_add(_i, _v) atomic_add_return(_i, _v)
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add(1, _v)
+#define atomic_inc(_v) atomic_add_return(1, _v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
-#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ return __CS_LOOP(v, i, "sr");
+}
+#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub(1, _v)
+#define atomic_dec(_v) atomic_sub_return(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
- __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
+ __CS_LOOP(v, ~mask, "nr");
}
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
{
- __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
+ __CS_LOOP(v, mask, "or");
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -129,8 +87,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
asm volatile(
" cs %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
+ : "+d" (old), "=Q" (v->counter)
+ : "d" (new), "Q" (v->counter)
: "cc", "memory");
return old;
}
@@ -151,56 +109,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
-#undef __ATOMIC_LOOP
+#undef __CS_LOOP
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC64_OR "laog"
-#define __ATOMIC64_AND "lang"
-#define __ATOMIC64_ADD "laag"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
-({ \
- long long old_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC64_OR "ogr"
-#define __ATOMIC64_AND "ngr"
-#define __ATOMIC64_ADD "agr"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
-({ \
+#define __CSG_LOOP(ptr, op_val, op_string) ({ \
long long old_val, new_val; \
- \
- typecheck(atomic64_t *, ptr); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=Q" (((atomic_t *)(ptr))->counter) \
+ : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \
- old_val; \
+ new_val; \
})
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
static inline long long atomic64_read(const atomic64_t *v)
{
long long c;
@@ -220,17 +149,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
+ return __CSG_LOOP(v, i, "agr");
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ return __CSG_LOOP(v, i, "sgr");
}
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
+ __CSG_LOOP(v, ~mask, "ngr");
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
+ __CSG_LOOP(v, mask, "ogr");
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -240,13 +174,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
{
asm volatile(
" csg %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
+ : "+d" (old), "=Q" (v->counter)
+ : "d" (new), "Q" (v->counter)
: "cc", "memory");
return old;
}
-#undef __ATOMIC64_LOOP
+#undef __CSG_LOOP
#else /* CONFIG_64BIT */
@@ -282,8 +216,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
" lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n"
" jl 0b\n"
- : "=&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
+ : "=&d" (rp_old), "=Q" (v->counter)
+ : "d" (rp_new), "Q" (v->counter)
: "cc");
return rp_old.pair;
}
@@ -296,8 +230,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
asm volatile(
" cds %0,%2,%1"
- : "+&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
+ : "+&d" (rp_old), "=Q" (v->counter)
+ : "d" (rp_new), "Q" (v->counter)
: "cc");
return rp_old.pair;
}
@@ -314,6 +248,17 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return new;
}
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old - i;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+ return new;
+}
+
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
@@ -336,24 +281,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
#endif /* CONFIG_64BIT */
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "agsi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
- } else {
- atomic64_add_return(i, v);
- }
-#else
- atomic64_add_return(i, v);
-#endif
-}
-
-static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
long long c, old;
@@ -361,7 +289,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
for (;;) {
if (unlikely(c == u))
break;
- old = atomic64_cmpxchg(v, c, c + i);
+ old = atomic64_cmpxchg(v, c, c + a);
if (likely(old == c))
break;
c = old;
@@ -386,14 +314,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
+#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add(1, _v)
+#define atomic64_inc(_v) atomic64_add_return(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
-#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub(1, _v)
+#define atomic64_dec(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 6e6ad06..10135a3 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,40 +1,10 @@
/*
- * Copyright IBM Corp. 1999,2013
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
- *
- * The description below was taken in large parts from the powerpc
- * bitops header file:
- * Within a word, bits are numbered LSB first. Lot's of places make
- * this assumption by directly testing bits with (val & (1<<nr)).
- * This can cause confusion for large (> 1 word) bitmaps on a
- * big-endian system because, unlike little endian, the number of each
- * bit depends on the word size.
- *
- * The bitop functions are defined to work on unsigned longs, so for an
- * s390x system the bits end up numbered:
- * |63..............0|127............64|191...........128|255...........196|
- * and on s390:
- * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
- *
- * There are a few little-endian macros used mostly for filesystem
- * bitmaps, these work on similar bit arrays layouts, but
- * byte-oriented:
- * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
- *
- * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
- * number field needs to be reversed compared to the big-endian bit
- * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
- *
- * We also have special functions which work with an MSB0 encoding:
- * on an s390x system the bits are numbered:
- * |0..............63|64............127|128...........191|192...........255|
- * and on s390:
- * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
- *
- * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
- * number field needs to be reversed compared to the LSB0 encoded bit
- * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
+ * Derived from "include/asm-i386/bitops.h"
+ * Copyright (C) 1992, Linus Torvalds
*
*/
@@ -45,348 +15,556 @@
#error only <linux/bitops.h> can be included directly
#endif
-#include <linux/typecheck.h>
#include <linux/compiler.h>
+/*
+ * 32 bit bitops format:
+ * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
+ * bit 32 is the LSB of *(addr+4). That combined with the
+ * big endian byte order on S390 give the following bit
+ * order in memory:
+ * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
+ * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ * after that follows the next long with bit numbers
+ * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
+ * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
+ * The reason for this bit ordering is the fact that
+ * in the architecture independent code bits operations
+ * of the form "flags |= (1 << bitnr)" are used INTERMIXED
+ * with operation of the form "set_bit(bitnr, flags)".
+ *
+ * 64 bit bitops format:
+ * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
+ * bit 64 is the LSB of *(addr+8). That combined with the
+ * big endian byte order on S390 give the following bit
+ * order in memory:
+ * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
+ * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
+ * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
+ * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ * after that follows the next long with bit numbers
+ * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
+ * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
+ * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
+ * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
+ * The reason for this bit ordering is the fact that
+ * in the architecture independent code bits operations
+ * of the form "flags |= (1 << bitnr)" are used INTERMIXED
+ * with operation of the form "set_bit(bitnr, flags)".
+ */
+
+/* bitmap tables from arch/s390/kernel/bitmap.c */
+extern const char _oi_bitmap[];
+extern const char _ni_bitmap[];
+extern const char _zb_findmap[];
+extern const char _sb_findmap[];
+
#ifndef CONFIG_64BIT
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
-#define __BITOPS_LOOP(__addr, __val, __op_string) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
+#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc"); \
- __old; \
-})
+ : "=&d" (__old), "=&d" (__new), \
+ "=Q" (*(unsigned long *) __addr) \
+ : "d" (__val), "Q" (*(unsigned long *) __addr) \
+ : "cc");
#else /* CONFIG_64BIT */
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __BITOPS_OR "laog"
-#define __BITOPS_AND "lang"
-#define __BITOPS_XOR "laxg"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string) \
-({ \
- unsigned long __old; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- __op_string " %0,%2,%1\n" \
- : "=d" (__old), "+Q" (*(__addr)) \
- : "d" (__val) \
- : "cc"); \
- __old; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
-#define __BITOPS_LOOP(__addr, __val, __op_string) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
+#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc"); \
- __old; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+ : "=&d" (__old), "=&d" (__new), \
+ "=Q" (*(unsigned long *) __addr) \
+ : "d" (__val), "Q" (*(unsigned long *) __addr) \
+ : "cc");
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-static inline unsigned long *
-__bitops_word(unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
-
- addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
- return (unsigned long *)addr;
-}
-
-static inline unsigned char *
-__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
-{
- return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-}
-
-static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+#ifdef CONFIG_SMP
+/*
+ * SMP safe set_bit routine based on compare and swap (CS)
+ */
+static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long addr, old, new, mask;
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- if (__builtin_constant_p(nr)) {
- unsigned char *caddr = __bitops_byte(nr, ptr);
-
- asm volatile(
- "oi %0,%b1\n"
- : "+Q" (*caddr)
- : "i" (1 << (nr & 7))
- : "cc");
- return;
- }
-#endif
+ addr = (unsigned long) ptr;
+ /* calculate address for CS */
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
+ /* make OR mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_OR);
+ /* Do the atomic update. */
+ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
}
-static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
+/*
+ * SMP safe clear_bit routine based on compare and swap (CS)
+ */
+static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
-
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- if (__builtin_constant_p(nr)) {
- unsigned char *caddr = __bitops_byte(nr, ptr);
+ unsigned long addr, old, new, mask;
- asm volatile(
- "ni %0,%b1\n"
- : "+Q" (*caddr)
- : "i" (~(1 << (nr & 7)))
- : "cc");
- return;
- }
-#endif
+ addr = (unsigned long) ptr;
+ /* calculate address for CS */
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
+ /* make AND mask */
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __BITOPS_LOOP(addr, mask, __BITOPS_AND);
+ /* Do the atomic update. */
+ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
}
-static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
+/*
+ * SMP safe change_bit routine based on compare and swap (CS)
+ */
+static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
-
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- if (__builtin_constant_p(nr)) {
- unsigned char *caddr = __bitops_byte(nr, ptr);
+ unsigned long addr, old, new, mask;
- asm volatile(
- "xi %0,%b1\n"
- : "+Q" (*caddr)
- : "i" (1 << (nr & 7))
- : "cc");
- return;
- }
-#endif
+ addr = (unsigned long) ptr;
+ /* calculate address for CS */
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
+ /* make XOR mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
+ /* Do the atomic update. */
+ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
}
+/*
+ * SMP safe test_and_set_bit routine based on compare and swap (CS)
+ */
static inline int
-test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long addr, old, new, mask;
+ addr = (unsigned long) ptr;
+ /* calculate address for CS */
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
+ /* make OR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
+ /* Do the atomic update. */
+ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
barrier();
return (old & mask) != 0;
}
+/*
+ * SMP safe test_and_clear_bit routine based on compare and swap (CS)
+ */
static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long addr, old, new, mask;
+ addr = (unsigned long) ptr;
+ /* calculate address for CS */
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
+ /* make AND/test mask */
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
+ /* Do the atomic update. */
+ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
barrier();
- return (old & ~mask) != 0;
+ return (old ^ new) != 0;
}
+/*
+ * SMP safe test_and_change_bit routine based on compare and swap (CS)
+ */
static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long addr, old, new, mask;
+ addr = (unsigned long) ptr;
+ /* calculate address for CS */
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
+ /* make XOR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
+ /* Do the atomic update. */
+ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
barrier();
return (old & mask) != 0;
}
+#endif /* CONFIG_SMP */
+/*
+ * fast, non-SMP set_bit routine
+ */
static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long addr;
+
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ asm volatile(
+ " oc %O0(1,%R0),%1"
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
+}
+
+static inline void
+__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
- *addr |= 1 << (nr & 7);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ *(unsigned char *) addr |= 1 << (nr & 7);
}
+#define set_bit_simple(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_set_bit((nr),(addr)) : \
+ __set_bit((nr),(addr)) )
+
+/*
+ * fast, non-SMP clear_bit routine
+ */
static inline void
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long addr;
+
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ asm volatile(
+ " nc %O0(1,%R0),%1"
+ : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
+}
+
+static inline void
+__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
- *addr &= ~(1 << (nr & 7));
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ *(unsigned char *) addr &= ~(1 << (nr & 7));
}
+#define clear_bit_simple(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_clear_bit((nr),(addr)) : \
+ __clear_bit((nr),(addr)) )
+
+/*
+ * fast, non-SMP change_bit routine
+ */
static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long addr;
+
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ asm volatile(
+ " xc %O0(1,%R0),%1"
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
+}
+
+static inline void
+__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
- *addr ^= 1 << (nr & 7);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ *(unsigned char *) addr ^= 1 << (nr & 7);
}
+#define change_bit_simple(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_change_bit((nr),(addr)) : \
+ __change_bit((nr),(addr)) )
+
+/*
+ * fast, non-SMP test_and_set_bit routine
+ */
static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long addr;
unsigned char ch;
- ch = *addr;
- *addr |= 1 << (nr & 7);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ ch = *(unsigned char *) addr;
+ asm volatile(
+ " oc %O0(1,%R0),%1"
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
+ : "cc", "memory");
return (ch >> (nr & 7)) & 1;
}
+#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
+/*
+ * fast, non-SMP test_and_clear_bit routine
+ */
static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long addr;
unsigned char ch;
- ch = *addr;
- *addr &= ~(1 << (nr & 7));
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ ch = *(unsigned char *) addr;
+ asm volatile(
+ " nc %O0(1,%R0),%1"
+ : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
+ : "cc", "memory");
return (ch >> (nr & 7)) & 1;
}
+#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
+/*
+ * fast, non-SMP test_and_change_bit routine
+ */
static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long addr;
unsigned char ch;
- ch = *addr;
- *addr ^= 1 << (nr & 7);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ ch = *(unsigned char *) addr;
+ asm volatile(
+ " xc %O0(1,%R0),%1"
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
+ : "cc", "memory");
return (ch >> (nr & 7)) & 1;
}
+#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
+
+#ifdef CONFIG_SMP
+#define set_bit set_bit_cs
+#define clear_bit clear_bit_cs
+#define change_bit change_bit_cs
+#define test_and_set_bit test_and_set_bit_cs
+#define test_and_clear_bit test_and_clear_bit_cs
+#define test_and_change_bit test_and_change_bit_cs
+#else
+#define set_bit set_bit_simple
+#define clear_bit clear_bit_simple
+#define change_bit change_bit_simple
+#define test_and_set_bit test_and_set_bit_simple
+#define test_and_clear_bit test_and_clear_bit_simple
+#define test_and_change_bit test_and_change_bit_simple
+#endif
+
+
+/*
+ * This routine doesn't need to be atomic.
+ */
-static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
{
- const volatile unsigned char *addr;
+ unsigned long addr;
+ unsigned char ch;
+
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ ch = *(volatile unsigned char *) addr;
+ return (ch >> (nr & 7)) & 1;
+}
- addr = ((const volatile unsigned char *)ptr);
- addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
- return (*addr >> (nr & 7)) & 1;
+static inline int
+__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
+ return (((volatile char *) addr)
+ [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
}
+#define test_bit(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)) )
+
/*
- * Functions which use MSB0 bit numbering.
- * On an s390x system the bits are numbered:
- * |0..............63|64............127|128...........191|192...........255|
- * and on s390:
- * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ * Optimized find bit helper functions.
+ */
+
+/**
+ * __ffz_word_loop - find byte offset of first long != -1UL
+ * @addr: pointer to array of unsigned long
+ * @size: size of the array in bits
*/
-unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
-unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
- unsigned long offset);
+static inline unsigned long __ffz_word_loop(const unsigned long *addr,
+ unsigned long size)
+{
+ typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
+ unsigned long bytes = 0;
+
+ asm volatile(
+#ifndef CONFIG_64BIT
+ " ahi %1,-1\n"
+ " sra %1,5\n"
+ " jz 1f\n"
+ "0: c %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,4(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#else
+ " aghi %1,-1\n"
+ " srag %1,%1,6\n"
+ " jz 1f\n"
+ "0: cg %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,8(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#endif
+ : "+&a" (bytes), "+&d" (size)
+ : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
+ : "cc" );
+ return bytes;
+}
-static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+/**
+ * __ffs_word_loop - find byte offset of first long != 0UL
+ * @addr: pointer to array of unsigned long
+ * @size: size of the array in bits
+ */
+static inline unsigned long __ffs_word_loop(const unsigned long *addr,
+ unsigned long size)
{
- return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+ typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
+ unsigned long bytes = 0;
+
+ asm volatile(
+#ifndef CONFIG_64BIT
+ " ahi %1,-1\n"
+ " sra %1,5\n"
+ " jz 1f\n"
+ "0: c %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,4(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#else
+ " aghi %1,-1\n"
+ " srag %1,%1,6\n"
+ " jz 1f\n"
+ "0: cg %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,8(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#endif
+ : "+&a" (bytes), "+&a" (size)
+ : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
+ : "cc" );
+ return bytes;
}
-static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+/**
+ * __ffz_word - add number of the first unset bit
+ * @nr: base value the bit number is added to
+ * @word: the word that is searched for unset bits
+ */
+static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
{
- return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+#ifdef CONFIG_64BIT
+ if ((word & 0xffffffff) == 0xffffffff) {
+ word >>= 32;
+ nr += 32;
+ }
+#endif
+ if ((word & 0xffff) == 0xffff) {
+ word >>= 16;
+ nr += 16;
+ }
+ if ((word & 0xff) == 0xff) {
+ word >>= 8;
+ nr += 8;
+ }
+ return nr + _zb_findmap[(unsigned char) word];
}
-static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+/**
+ * __ffs_word - add number of the first set bit
+ * @nr: base value the bit number is added to
+ * @word: the word that is searched for set bits
+ */
+static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
{
- return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+#ifdef CONFIG_64BIT
+ if ((word & 0xffffffff) == 0) {
+ word >>= 32;
+ nr += 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ word >>= 16;
+ nr += 16;
+ }
+ if ((word & 0xff) == 0) {
+ word >>= 8;
+ nr += 8;
+ }
+ return nr + _sb_findmap[(unsigned char) word];
}
-static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+
+/**
+ * __load_ulong_be - load big endian unsigned long
+ * @p: pointer to array of unsigned long
+ * @offset: byte offset of source value in the array
+ */
+static inline unsigned long __load_ulong_be(const unsigned long *p,
+ unsigned long offset)
{
- return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+ p = (unsigned long *)((unsigned long) p + offset);
+ return *p;
}
-static inline int test_bit_inv(unsigned long nr,
- const volatile unsigned long *ptr)
+/**
+ * __load_ulong_le - load little endian unsigned long
+ * @p: pointer to array of unsigned long
+ * @offset: byte offset of source value in the array
+ */
+static inline unsigned long __load_ulong_le(const unsigned long *p,
+ unsigned long offset)
{
- return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+ unsigned long word;
+
+ p = (unsigned long *)((unsigned long) p + offset);
+#ifndef CONFIG_64BIT
+ asm volatile(
+ " ic %0,%O1(%R1)\n"
+ " icm %0,2,%O1+1(%R1)\n"
+ " icm %0,4,%O1+2(%R1)\n"
+ " icm %0,8,%O1+3(%R1)"
+ : "=&d" (word) : "Q" (*p) : "cc");
+#else
+ asm volatile(
+ " lrvg %0,%1"
+ : "=d" (word) : "m" (*p) );
+#endif
+ return word;
}
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+/*
+ * The various find bit functions.
+ */
-/**
- * __flogr - find leftmost one
- * @word - The word to search
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
*
- * Returns the bit number of the most significant bit set,
- * where the most significant bit has bit number 0.
- * If no bit is set this function returns 64.
- */
-static inline unsigned char __flogr(unsigned long word)
-{
- if (__builtin_constant_p(word)) {
- unsigned long bit = 0;
-
- if (!word)
- return 64;
- if (!(word & 0xffffffff00000000UL)) {
- word <<= 32;
- bit += 32;
- }
- if (!(word & 0xffff000000000000UL)) {
- word <<= 16;
- bit += 16;
- }
- if (!(word & 0xff00000000000000UL)) {
- word <<= 8;
- bit += 8;
- }
- if (!(word & 0xf000000000000000UL)) {
- word <<= 4;
- bit += 4;
- }
- if (!(word & 0xc000000000000000UL)) {
- word <<= 2;
- bit += 2;
- }
- if (!(word & 0x8000000000000000UL)) {
- word <<= 1;
- bit += 1;
- }
- return bit;
- } else {
- register unsigned long bit asm("4") = word;
- register unsigned long out asm("5");
-
- asm volatile(
- " flogr %[bit],%[bit]\n"
- : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
- return bit;
- }
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ return __ffz_word(0, word);
}
/**
@@ -395,83 +573,337 @@ static inline unsigned char __flogr(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs (unsigned long word)
{
- return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
+ return __ffs_word(0, word);
}
/**
* ffs - find first bit set
- * @word: the word to search
+ * @x: the word to search
*
- * This is defined the same way as the libc and
- * compiler builtin ffs routines (man ffs).
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
*/
-static inline int ffs(int word)
+static inline int ffs(int x)
{
- unsigned long mask = 2 * BITS_PER_LONG - 1;
- unsigned int val = (unsigned int)word;
-
- return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
+ if (!x)
+ return 0;
+ return __ffs_word(1, x);
}
/**
- * __fls - find last (most-significant) set bit in a long word
- * @word: the word to search
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
*
- * Undefined if no set bit exists, so code should check against 0 first.
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
*/
-static inline unsigned long __fls(unsigned long word)
+static inline unsigned long find_first_zero_bit(const unsigned long *addr,
+ unsigned long size)
{
- return __flogr(word) ^ (BITS_PER_LONG - 1);
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffz_word_loop(addr, size);
+ bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
}
+#define find_first_zero_bit find_first_zero_bit
/**
- * fls64 - find last set bit in a 64-bit word
- * @word: the word to search
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
*
- * This is defined in a similar way as the libc and compiler builtin
- * ffsll, but returns the position of the most significant set bit.
- *
- * fls64(value) returns 0 if value is 0 or the position of the last
- * set bit if value is nonzero. The last (most significant) bit is
- * at position 64.
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+static inline unsigned long find_first_bit(const unsigned long * addr,
+ unsigned long size)
+{
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffs_word_loop(addr, size);
+ bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
+}
+#define find_first_bit find_first_bit
+
+/*
+ * Big endian variant whichs starts bit counting from left using
+ * the flogr (find leftmost one) instruction.
+ */
+static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
+{
+ register unsigned long bit asm("2") = val;
+ register unsigned long out asm("3");
+
+ asm volatile (
+ " .insn rre,0xb9830000,%[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return nr + bit;
+}
+
+/*
+ * 64 bit special left bitops format:
+ * order in memory:
+ * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
+ * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
+ * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
+ * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
+ * after that follows the next long with bit numbers
+ * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
+ * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
+ * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
+ * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
+ * The reason for this bit ordering is the fact that
+ * the hardware sets bits in a bitmap starting at bit 0
+ * and we don't want to scan the bitmap from the 'wrong
+ * end'.
*/
-static inline int fls64(unsigned long word)
+static inline unsigned long find_first_bit_left(const unsigned long *addr,
+ unsigned long size)
{
- unsigned long mask = 2 * BITS_PER_LONG - 1;
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffs_word_loop(addr, size);
+ bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
+}
- return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
+static inline int find_next_bit_left(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (BITS_PER_LONG - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / BITS_PER_LONG;
+ if (bit) {
+ set = __flo_word(0, *p & (~0UL >> bit));
+ if (set >= size)
+ return size + offset;
+ if (set < BITS_PER_LONG)
+ return set + offset;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ p++;
+ }
+ return offset + find_first_bit_left(p, size);
}
+#define for_each_set_bit_left(bit, addr, size) \
+ for ((bit) = find_first_bit_left((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_left_cont(bit, addr, size) \
+ for ((bit) = find_next_bit_left((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+
/**
- * fls - find last (most-significant) bit set
- * @word: the word to search
- *
- * This is defined the same way as ffs.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
*/
-static inline int fls(int word)
+static inline int find_next_zero_bit (const unsigned long * addr,
+ unsigned long size,
+ unsigned long offset)
{
- return fls64((unsigned int)word);
+ const unsigned long *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (BITS_PER_LONG - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / BITS_PER_LONG;
+ if (bit) {
+ /*
+ * __ffz_word returns BITS_PER_LONG
+ * if no zero bit is present in the word.
+ */
+ set = __ffz_word(bit, *p >> bit);
+ if (set >= size)
+ return size + offset;
+ if (set < BITS_PER_LONG)
+ return set + offset;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ p++;
+ }
+ return offset + find_first_zero_bit(p, size);
}
+#define find_next_zero_bit find_next_zero_bit
-#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+/**
+ * find_next_bit - find the first set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline int find_next_bit (const unsigned long * addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (BITS_PER_LONG - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / BITS_PER_LONG;
+ if (bit) {
+ /*
+ * __ffs_word returns BITS_PER_LONG
+ * if no one bit is present in the word.
+ */
+ set = __ffs_word(0, *p & (~0UL << bit));
+ if (set >= size)
+ return size + offset;
+ if (set < BITS_PER_LONG)
+ return set + offset;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ p++;
+ }
+ return offset + find_first_bit(p, size);
+}
+#define find_next_bit find_next_bit
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(unsigned long *b)
+{
+ return find_first_bit(b, 140);
+}
-#include <asm-generic/bitops/__ffs.h>
-#include <asm-generic/bitops/ffs.h>
-#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
-
-#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/sched.h>
+
+/*
+ * ATTENTION: intel byte ordering convention for ext2 and minix !!
+ * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
+ * bit 32 is the LSB of (addr+4).
+ * That combined with the little endian byte order of Intel gives the
+ * following bit order in memory:
+ * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
+ * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
+ */
+
+static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
+{
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffz_word_loop(vaddr, size);
+ bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
+ return (bits < size) ? bits : size;
+}
+#define find_first_zero_bit_le find_first_zero_bit_le
+
+static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
+ unsigned long offset)
+{
+ unsigned long *addr = vaddr, *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (BITS_PER_LONG - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / BITS_PER_LONG;
+ if (bit) {
+ /*
+ * s390 version of ffz returns BITS_PER_LONG
+ * if no zero bit is present in the word.
+ */
+ set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
+ if (set >= size)
+ return size + offset;
+ if (set < BITS_PER_LONG)
+ return set + offset;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ p++;
+ }
+ return offset + find_first_zero_bit_le(p, size);
+}
+#define find_next_zero_bit_le find_next_zero_bit_le
+
+static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
+{
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffs_word_loop(vaddr, size);
+ bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
+ return (bits < size) ? bits : size;
+}
+#define find_first_bit_le find_first_bit_le
+
+static inline int find_next_bit_le(void *vaddr, unsigned long size,
+ unsigned long offset)
+{
+ unsigned long *addr = vaddr, *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (BITS_PER_LONG - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / BITS_PER_LONG;
+ if (bit) {
+ /*
+ * s390 version of ffz returns BITS_PER_LONG
+ * if no zero bit is present in the word.
+ */
+ set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
+ if (set >= size)
+ return size + offset;
+ if (set < BITS_PER_LONG)
+ return set + offset;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ p++;
+ }
+ return offset + find_first_bit_le(p, size);
+}
+#define find_next_bit_le find_next_bit_le
+
#include <asm-generic/bitops/le.h>
+
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 4bf9da0..c1e7c64 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -22,7 +22,6 @@
#define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
-#define PSW32_MASK_RI 0x00000080UL
#define PSW32_MASK_USER 0x0000FF00UL
@@ -36,9 +35,7 @@
#define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL
-#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
- PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
- PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
+extern u32 psw32_user_bits;
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 4e63f1a..debfda3 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -7,62 +7,70 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
-#include <linux/bug.h>
-
#ifdef CONFIG_64BIT
-# define __CTL_LOAD "lctlg"
-# define __CTL_STORE "stctg"
-#else
-# define __CTL_LOAD "lctl"
-# define __CTL_STORE "stctl"
-#endif
-
-#define __ctl_load(array, low, high) { \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- \
- BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
- asm volatile( \
- __CTL_LOAD " %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
-}
-
-#define __ctl_store(array, low, high) { \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- \
- BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
- asm volatile( \
- __CTL_STORE " %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
-}
-
-static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
-{
- unsigned long reg;
-
- __ctl_store(reg, cr, cr);
- reg |= 1UL << bit;
- __ctl_load(reg, cr, cr);
-}
-
-static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
-{
- unsigned long reg;
-
- __ctl_store(reg, cr, cr);
- reg &= ~(1UL << bit);
- __ctl_load(reg, cr, cr);
-}
-
-void smp_ctl_set_bit(int cr, int bit);
-void smp_ctl_clear_bit(int cr, int bit);
+
+#define __ctl_load(array, low, high) ({ \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ asm volatile( \
+ " lctlg %1,%2,%0\n" \
+ : : "Q" (*(addrtype *)(&array)), \
+ "i" (low), "i" (high)); \
+ })
+
+#define __ctl_store(array, low, high) ({ \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ asm volatile( \
+ " stctg %1,%2,%0\n" \
+ : "=Q" (*(addrtype *)(&array)) \
+ : "i" (low), "i" (high)); \
+ })
+
+#else /* CONFIG_64BIT */
+
+#define __ctl_load(array, low, high) ({ \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ asm volatile( \
+ " lctl %1,%2,%0\n" \
+ : : "Q" (*(addrtype *)(&array)), \
+ "i" (low), "i" (high)); \
+})
+
+#define __ctl_store(array, low, high) ({ \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ asm volatile( \
+ " stctl %1,%2,%0\n" \
+ : "=Q" (*(addrtype *)(&array)) \
+ : "i" (low), "i" (high)); \
+ })
+
+#endif /* CONFIG_64BIT */
+
+#define __ctl_set_bit(cr, bit) ({ \
+ unsigned long __dummy; \
+ __ctl_store(__dummy, cr, cr); \
+ __dummy |= 1UL << (bit); \
+ __ctl_load(__dummy, cr, cr); \
+})
+
+#define __ctl_clear_bit(cr, bit) ({ \
+ unsigned long __dummy; \
+ __ctl_store(__dummy, cr, cr); \
+ __dummy &= ~(1UL << (bit)); \
+ __ctl_load(__dummy, cr, cr); \
+})
#ifdef CONFIG_SMP
-# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
+
+extern void smp_ctl_set_bit(int cr, int bit);
+extern void smp_ctl_clear_bit(int cr, int bit);
+#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
+
#else
-# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-#endif
+
+#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+
+#endif /* CONFIG_SMP */
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15e..188c505 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -107,11 +107,6 @@ void debug_set_level(debug_info_t* id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
-static inline bool debug_level_enabled(debug_info_t* id, int level)
-{
- return level <= id->level;
-}
-
static inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length)
{
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
deleted file mode 100644
index 04a83f5..0000000
--- a/arch/s390/include/asm/dis.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Disassemble s390 instructions.
- *
- * Copyright IBM Corp. 2007
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- */
-
-#ifndef __ASM_S390_DIS_H__
-#define __ASM_S390_DIS_H__
-
-/* Type of operand */
-#define OPERAND_GPR 0x1 /* Operand printed as %rx */
-#define OPERAND_FPR 0x2 /* Operand printed as %fx */
-#define OPERAND_AR 0x4 /* Operand printed as %ax */
-#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_DISP 0x10 /* Operand printed as displacement */
-#define OPERAND_BASE 0x20 /* Operand printed as base register */
-#define OPERAND_INDEX 0x40 /* Operand printed as index register */
-#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
-
-
-struct s390_operand {
- int bits; /* The number of bits in the operand. */
- int shift; /* The number of bits to shift. */
- int flags; /* One bit syntax flags. */
-};
-
-struct s390_insn {
- const char name[5];
- unsigned char opfrag;
- unsigned char format;
-};
-
-
-static inline int insn_length(unsigned char code)
-{
- return ((((int) code + 64) >> 7) + 1) << 1;
-}
-
-void show_code(struct pt_regs *regs);
-void print_fn_code(unsigned char *code, unsigned long len);
-int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
-struct s390_insn *find_insn(unsigned char *code);
-
-static inline int is_known_insn(unsigned char *code)
-{
- return !!find_insn(code);
-}
-
-#endif /* __ASM_S390_DIS_H__ */
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 6702630..dc9200c 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -111,7 +111,18 @@ struct scm_driver {
int scm_driver_register(struct scm_driver *scmdrv);
void scm_driver_unregister(struct scm_driver *scmdrv);
-int eadm_start_aob(struct aob *aob);
+int scm_start_aob(struct aob *aob);
void scm_irq_handler(struct aob *aob, int error);
+struct eadm_ops {
+ int (*eadm_start) (struct aob *aob);
+ struct module *owner;
+};
+
+int scm_get_ref(void);
+void scm_put_ref(void);
+
+void register_eadm_ops(struct eadm_ops *ops);
+void unregister_eadm_ops(struct eadm_ops *ops);
+
#endif /* _ASM_S390_EADM_H */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index 7ecb92b..ef61709 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -12,9 +12,9 @@
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
-#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
-#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
-#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
+#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5)
+#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6)
+#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7)
#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
@@ -54,11 +54,11 @@ struct tcw {
u32 intrg;
} __attribute__ ((packed, aligned(64)));
-#define TIDAW_FLAGS_LAST (1 << (7 - 0))
-#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
-#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
-#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
-#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
+#define TIDAW_FLAGS_LAST 1 << (7 - 0)
+#define TIDAW_FLAGS_SKIP 1 << (7 - 1)
+#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2)
+#define TIDAW_FLAGS_TTIC 1 << (7 - 3)
+#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4)
/**
* struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
u8 sense[32];
} __attribute__ ((packed));
-#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
-#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
-#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
+#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0)
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1)
+#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2)
/**
* struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
#define TSB_FORMAT_DDPC 2
#define TSB_FORMAT_INTRG 3
-#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
-#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
-#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
-#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
+#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0)
+#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1)
+#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2)
+#define TSB_FLAGS_TIME_VALID 1 << (7 - 3)
#define TSB_FLAGS_FORMAT(x) ((x) & 7)
#define TSB_FORMAT(t) ((t)->flags & 7)
@@ -179,9 +179,9 @@ struct tsb {
#define DCW_INTRG_RCQ_PRIMARY 1
#define DCW_INTRG_RCQ_SECONDARY 2
-#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
-#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
-#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
+#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0)
+#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1)
+#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2)
/**
* struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
u8 prog_data[0];
} __attribute__ ((packed));
-#define DCW_FLAGS_CC (1 << (7 - 1))
+#define DCW_FLAGS_CC 1 << (7 - 1)
#define DCW_CMD_WRITE 0x01
#define DCW_CMD_READ 0x02
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index b7eabaa..a908d29 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -18,6 +18,8 @@
#define __ARCH_HAS_DO_SOFTIRQ
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
+#define HARDIRQ_BITS 8
+
static inline void ack_bad_irq(unsigned int irq)
{
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2fcccc0..2bd6cb8 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -7,7 +7,6 @@
#ifndef _ASM_S390_IPL_H
#define _ASM_S390_IPL_H
-#include <asm/lowcore.h>
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
@@ -87,14 +86,7 @@ struct ipl_parameter_block {
*/
extern u32 ipl_flags;
extern u32 dump_prefix_page;
-
-struct dump_save_areas {
- struct save_area **areas;
- int count;
-};
-
-extern struct dump_save_areas dump_save_areas;
-struct save_area *dump_save_area_create(int cpu);
+extern unsigned int zfcpdump_prefix_array[];
extern void do_reipl(void);
extern void do_halt(void);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d5bc375..e87ecaa 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -38,6 +38,13 @@ struct sca_block {
struct sca_entry cpu[64];
} __attribute__((packed));
+#define KVM_NR_PAGE_SIZES 2
+#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
+#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
+#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
+#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
+#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
+
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
@@ -213,6 +220,7 @@ struct kvm_s390_interrupt_info {
/* for local_interrupt.action_flags */
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
+#define ACTION_RELOADVCPU_ON_STOP (1<<2)
struct kvm_s390_local_interrupt {
spinlock_t lock;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5d1f950..9f973d8 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -40,8 +40,14 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- /* Load primary space page table origin. */
- asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
+ if (s390_user_mode != HOME_SPACE_MODE) {
+ /* Load primary space page table origin. */
+ asm volatile(LCTL_OPCODE" 1,1,%0\n"
+ : : "m" (S390_lowcore.user_asce) );
+ } else
+ /* Load home space page table origin. */
+ asm volatile(LCTL_OPCODE" 13,13,%0"
+ : : "m" (S390_lowcore.user_asce) );
set_fs(current->thread.mm_segment);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 114258e..1e51f29 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,12 +30,7 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
-static inline void storage_key_init_range(unsigned long start, unsigned long end)
-{
-#if PAGE_DEFAULT_KEY
- __storage_key_init_range(start, end);
-#endif
-}
+void storage_key_init_range(unsigned long start, unsigned long end);
static inline void clear_page(void *page)
{
@@ -48,21 +43,33 @@ static inline void clear_page(void *page)
: "memory", "cc");
}
-/*
- * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
- * bypass caches when copying a page. Especially when copying huge pages
- * this keeps L1 and L2 data caches alive.
- */
static inline void copy_page(void *to, void *from)
{
- register void *reg2 asm ("2") = to;
- register unsigned long reg3 asm ("3") = 0x1000;
- register void *reg4 asm ("4") = from;
- register unsigned long reg5 asm ("5") = 0xb0001000;
- asm volatile(
- " mvcl 2,4"
- : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
- : : "memory", "cc");
+ if (MACHINE_HAS_MVPG) {
+ register unsigned long reg0 asm ("0") = 0;
+ asm volatile(
+ " mvpg %0,%1"
+ : : "a" (to), "a" (from), "d" (reg0)
+ : "memory", "cc");
+ } else
+ asm volatile(
+ " mvc 0(256,%0),0(%1)\n"
+ " mvc 256(256,%0),256(%1)\n"
+ " mvc 512(256,%0),512(%1)\n"
+ " mvc 768(256,%0),768(%1)\n"
+ " mvc 1024(256,%0),1024(%1)\n"
+ " mvc 1280(256,%0),1280(%1)\n"
+ " mvc 1536(256,%0),1536(%1)\n"
+ " mvc 1792(256,%0),1792(%1)\n"
+ " mvc 2048(256,%0),2048(%1)\n"
+ " mvc 2304(256,%0),2304(%1)\n"
+ " mvc 2560(256,%0),2560(%1)\n"
+ " mvc 2816(256,%0),2816(%1)\n"
+ " mvc 3072(256,%0),3072(%1)\n"
+ " mvc 3328(256,%0),3328(%1)\n"
+ " mvc 3584(256,%0),3584(%1)\n"
+ " mvc 3840(256,%0),3840(%1)\n"
+ : : "a" (to), "a" (from) : "memory");
}
#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c129ab2..1cc185d 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -63,10 +63,9 @@ enum zpci_state {
};
struct zpci_bar_struct {
- struct resource *res; /* bus resource */
u32 val; /* bar start & 3 flag bits */
- u16 map_idx; /* index into bar mapping array */
u8 size; /* order 2 exponent */
+ u16 map_idx; /* index into bar mapping array */
};
/* Private data per function */
@@ -98,7 +97,6 @@ struct zpci_dev {
unsigned long iommu_pages;
unsigned int next_bit;
- char res_name[16];
struct zpci_bar_struct bars[PCI_BAR_COUNT];
u64 start_dma; /* Start of available DMA addresses */
@@ -124,10 +122,12 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
Prototypes
----------------------------------------------------------------------------- */
/* Base stuff */
+struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
+void zpci_free_device(struct zpci_dev *);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index ac24b26..1ca5d10 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -6,9 +6,14 @@
extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
+#ifdef CONFIG_PCI_DEBUG
#define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
+#else /* !CONFIG_PCI_DEBUG */
+#define zpci_dbg(imp, fmt, args...) do { } while (0)
+#endif
+
#define zpci_err(text...) \
do { \
char debug_buffer[16]; \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 649eb62..df6eac9 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -54,9 +54,11 @@
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
- u32 : 32;
+ u32 reserved1;
u8 fc; /* function controls */
- u64 : 56;
+ u8 reserved2;
+ u16 reserved3;
+ u32 reserved4;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
@@ -68,13 +70,14 @@ struct zpci_fib {
u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */
- u32 : 32;
+ u32 reserved5;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */
- u32 : 32;
- u32 gd;
-} __packed __aligned(8);
+ u64 reserved6;
+ u64 reserved7;
+} __packed;
+
int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index fa91e00..86fe0ee 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,22 +10,16 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
-#ifdef CONFIG_64BIT
-
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
-#if defined(CONFIG_SMP) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-/*
- * We use a compare-and-swap loop since that uses less cpu cycles than
- * disabling and enabling interrupts like the generic variant would do.
- */
-#define arch_this_cpu_to_op_simple(pcp, val, op) \
+#define arch_this_cpu_to_op(pcp, val, op) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
@@ -36,101 +30,42 @@
do { \
old__ = prev__; \
new__ = old__ op (val); \
- prev__ = cmpxchg(ptr__, old__, new__); \
+ switch (sizeof(*ptr__)) { \
+ case 8: \
+ prev__ = cmpxchg64(ptr__, old__, new__); \
+ break; \
+ default: \
+ prev__ = cmpxchg(ptr__, old__, new__); \
+ } \
} while (prev__ != old__); \
preempt_enable(); \
new__; \
})
-#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
-#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
-#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-
-#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
-{ \
- typedef typeof(pcp) pcp_op_T__; \
- pcp_op_T__ val__ = (val); \
- pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
- if (__builtin_constant_p(val__) && \
- ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
- asm volatile( \
- op2 " %[ptr__],%[val__]\n" \
- : [ptr__] "+Q" (*ptr__) \
- : [val__] "i" ((szcast)val__) \
- : "cc"); \
- } else { \
- asm volatile( \
- op1 " %[old__],%[val__],%[ptr__]\n" \
- : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
- : [val__] "d" (val__) \
- : "cc"); \
- } \
- preempt_enable(); \
-}
+#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define arch_this_cpu_add_return(pcp, val, op) \
-({ \
- typedef typeof(pcp) pcp_op_T__; \
- pcp_op_T__ val__ = (val); \
- pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
- asm volatile( \
- op " %[old__],%[val__],%[ptr__]\n" \
- : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
- : [val__] "d" (val__) \
- : "cc"); \
- preempt_enable(); \
- old__ + val__; \
-})
+#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
+#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define arch_this_cpu_to_op(pcp, val, op) \
-{ \
- typedef typeof(pcp) pcp_op_T__; \
- pcp_op_T__ val__ = (val); \
- pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
- asm volatile( \
- op " %[old__],%[val__],%[ptr__]\n" \
- : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
- : [val__] "d" (val__) \
- : "cc"); \
- preempt_enable(); \
-}
-
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
@@ -139,7 +74,13 @@
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
- ret__ = cmpxchg(ptr__, oval, nval); \
+ switch (sizeof(*ptr__)) { \
+ case 8: \
+ ret__ = cmpxchg64(ptr__, oval, nval); \
+ break; \
+ default: \
+ ret__ = cmpxchg(ptr__, oval, nval); \
+ } \
preempt_enable(); \
ret__; \
})
@@ -163,7 +104,9 @@
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
@@ -181,9 +124,9 @@
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
+#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-
-#endif /* CONFIG_64BIT */
+#endif
#include <asm-generic/percpu.h>
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0a876bc..ca7821f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -134,17 +134,19 @@ struct stack_frame {
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
- regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
+ regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
- regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
+ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
+ __tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \
+ update_mm(current->mm, current); \
execve_tail(); \
} while (0)
@@ -167,15 +169,17 @@ extern void release_thread(struct task_struct *);
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
+extern void show_code(struct pt_regs *regs);
+extern void print_fn_code(unsigned char *code, unsigned long len);
+extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
+ unsigned int len);
+
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
-/* Has task runtime instrumentation enabled ? */
-#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
-
static inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -344,9 +348,9 @@ __set_psw_mask(unsigned long mask)
}
#define local_mcck_enable() \
- __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
+ __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \
- __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
+ __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
/*
* Basic Machine Check/Program Check Handler.
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 9c82ceb..52b5653 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -10,11 +10,8 @@
#ifndef __ASSEMBLY__
-#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
- PSW_MASK_EA | PSW_MASK_BA)
-#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
- PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
- PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
+extern long psw_kernel_bits;
+extern long psw_user_bits;
/*
* The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 30ef748..7dc7f9c 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -43,6 +43,7 @@ struct sclp_cpu_info {
int sclp_get_cpu_info(struct sclp_cpu_info *info);
int sclp_cpu_configure(u8 cpu);
int sclp_cpu_deconfigure(u8 cpu);
+void sclp_facilities_detect(void);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
int sclp_sdias_blk_count(void);
@@ -56,7 +57,5 @@ bool sclp_has_vt220(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
-unsigned long sclp_get_hsa_size(void);
-void sclp_early_detect(void);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 94cfbe4..59880dba 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -48,6 +48,13 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size);
+#define PRIMARY_SPACE_MODE 0
+#define ACCESS_REGISTER_MODE 1
+#define SECONDARY_SPACE_MODE 2
+#define HOME_SPACE_MODE 3
+
+extern unsigned int s390_user_mode;
+
/*
* Machine features detected in head.S
*/
@@ -107,6 +114,9 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
#endif /* CONFIG_64BIT */
+#define ZFCPDUMP_HSA_SIZE (32UL<<20)
+#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
+
/*
* Console mode. Override with conmode=
*/
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ac9bed8..b64f15c 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -14,6 +14,7 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
+extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 29c81f8..6dbd559 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -13,94 +13,58 @@
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
-static inline int test_fp_ctl(u32 fpc)
+static inline void save_fp_regs(s390_fp_regs *fpregs)
{
- u32 orig_fpc;
- int rc;
-
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
- " efpc %1\n"
- " sfpc %2\n"
- "0: sfpc %1\n"
- " la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (orig_fpc)
- : "d" (fpc), "0" (-EINVAL));
- return rc;
-}
-
-static inline void save_fp_ctl(u32 *fpc)
-{
+ " std 0,%O0+8(%R0)\n"
+ " std 2,%O0+24(%R0)\n"
+ " std 4,%O0+40(%R0)\n"
+ " std 6,%O0+56(%R0)"
+ : "=Q" (*fpregs) : "Q" (*fpregs));
if (!MACHINE_HAS_IEEE)
return;
-
asm volatile(
- " stfpc %0\n"
- : "+Q" (*fpc));
+ " stfpc %0\n"
+ " std 1,%O0+16(%R0)\n"
+ " std 3,%O0+32(%R0)\n"
+ " std 5,%O0+48(%R0)\n"
+ " std 7,%O0+64(%R0)\n"
+ " std 8,%O0+72(%R0)\n"
+ " std 9,%O0+80(%R0)\n"
+ " std 10,%O0+88(%R0)\n"
+ " std 11,%O0+96(%R0)\n"
+ " std 12,%O0+104(%R0)\n"
+ " std 13,%O0+112(%R0)\n"
+ " std 14,%O0+120(%R0)\n"
+ " std 15,%O0+128(%R0)\n"
+ : "=Q" (*fpregs) : "Q" (*fpregs));
}
-static inline int restore_fp_ctl(u32 *fpc)
+static inline void restore_fp_regs(s390_fp_regs *fpregs)
{
- int rc;
-
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
- "0: lfpc %1\n"
- " la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
- return rc;
-}
-
-static inline void save_fp_regs(freg_t *fprs)
-{
- asm volatile("std 0,%0" : "=Q" (fprs[0]));
- asm volatile("std 2,%0" : "=Q" (fprs[2]));
- asm volatile("std 4,%0" : "=Q" (fprs[4]));
- asm volatile("std 6,%0" : "=Q" (fprs[6]));
+ " ld 0,%O0+8(%R0)\n"
+ " ld 2,%O0+24(%R0)\n"
+ " ld 4,%O0+40(%R0)\n"
+ " ld 6,%O0+56(%R0)"
+ : : "Q" (*fpregs));
if (!MACHINE_HAS_IEEE)
return;
- asm volatile("std 1,%0" : "=Q" (fprs[1]));
- asm volatile("std 3,%0" : "=Q" (fprs[3]));
- asm volatile("std 5,%0" : "=Q" (fprs[5]));
- asm volatile("std 7,%0" : "=Q" (fprs[7]));
- asm volatile("std 8,%0" : "=Q" (fprs[8]));
- asm volatile("std 9,%0" : "=Q" (fprs[9]));
- asm volatile("std 10,%0" : "=Q" (fprs[10]));
- asm volatile("std 11,%0" : "=Q" (fprs[11]));
- asm volatile("std 12,%0" : "=Q" (fprs[12]));
- asm volatile("std 13,%0" : "=Q" (fprs[13]));
- asm volatile("std 14,%0" : "=Q" (fprs[14]));
- asm volatile("std 15,%0" : "=Q" (fprs[15]));
-}
-
-static inline void restore_fp_regs(freg_t *fprs)
-{
- asm volatile("ld 0,%0" : : "Q" (fprs[0]));
- asm volatile("ld 2,%0" : : "Q" (fprs[2]));
- asm volatile("ld 4,%0" : : "Q" (fprs[4]));
- asm volatile("ld 6,%0" : : "Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
- asm volatile("ld 1,%0" : : "Q" (fprs[1]));
- asm volatile("ld 3,%0" : : "Q" (fprs[3]));
- asm volatile("ld 5,%0" : : "Q" (fprs[5]));
- asm volatile("ld 7,%0" : : "Q" (fprs[7]));
- asm volatile("ld 8,%0" : : "Q" (fprs[8]));
- asm volatile("ld 9,%0" : : "Q" (fprs[9]));
- asm volatile("ld 10,%0" : : "Q" (fprs[10]));
- asm volatile("ld 11,%0" : : "Q" (fprs[11]));
- asm volatile("ld 12,%0" : : "Q" (fprs[12]));
- asm volatile("ld 13,%0" : : "Q" (fprs[13]));
- asm volatile("ld 14,%0" : : "Q" (fprs[14]));
- asm volatile("ld 15,%0" : : "Q" (fprs[15]));
+ asm volatile(
+ " lfpc %0\n"
+ " ld 1,%O0+16(%R0)\n"
+ " ld 3,%O0+32(%R0)\n"
+ " ld 5,%O0+48(%R0)\n"
+ " ld 7,%O0+64(%R0)\n"
+ " ld 8,%O0+72(%R0)\n"
+ " ld 9,%O0+80(%R0)\n"
+ " ld 10,%O0+88(%R0)\n"
+ " ld 11,%O0+96(%R0)\n"
+ " ld 12,%O0+104(%R0)\n"
+ " ld 13,%O0+112(%R0)\n"
+ " ld 14,%O0+120(%R0)\n"
+ " ld 15,%O0+128(%R0)\n"
+ : : "Q" (*fpregs));
}
static inline void save_access_regs(unsigned int *acrs)
@@ -119,14 +83,12 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
- save_fp_ctl(&prev->thread.fp_regs.fpc); \
- save_fp_regs(prev->thread.fp_regs.fprs); \
+ save_fp_regs(&prev->thread.fp_regs); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
- restore_fp_ctl(&next->thread.fp_regs.fpc); \
- restore_fp_regs(next->thread.fp_regs.fprs); \
+ restore_fp_regs(&next->thread.fp_regs); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 10e0fcd..eb5f64d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -111,4 +111,6 @@ static inline struct thread_info *current_thread_info(void)
#define is_32bit_task() (1)
#endif
+#define PREEMPT_ACTIVE 0x4000000
+
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1c..819b94d 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,11 +71,9 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
{
- typedef struct { char _[sizeof(clk)]; } addrtype;
-
- asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
+ asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 79330af..9c33ed4 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -94,7 +94,9 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
struct uaccess_ops {
size_t (*copy_from_user)(size_t, const void __user *, void *);
+ size_t (*copy_from_user_small)(size_t, const void __user *, void *);
size_t (*copy_to_user)(size_t, void __user *, const void *);
+ size_t (*copy_to_user_small)(size_t, void __user *, const void *);
size_t (*copy_in_user)(size_t, void __user *, const void __user *);
size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *);
@@ -104,20 +106,22 @@ struct uaccess_ops {
};
extern struct uaccess_ops uaccess;
+extern struct uaccess_ops uaccess_std;
extern struct uaccess_ops uaccess_mvcos;
+extern struct uaccess_ops uaccess_mvcos_switch;
extern struct uaccess_ops uaccess_pt;
extern int __handle_fault(unsigned long, unsigned long, int);
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- size = uaccess.copy_to_user(size, ptr, x);
+ size = uaccess.copy_to_user_small(size, ptr, x);
return size ? -EFAULT : size;
}
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = uaccess.copy_from_user(size, ptr, x);
+ size = uaccess.copy_from_user_small(size, ptr, x);
return size ? -EFAULT : size;
}
@@ -222,7 +226,10 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return uaccess.copy_to_user(n, to, from);
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_to_user_small(n, to, from);
+ else
+ return uaccess.copy_to_user(n, to, from);
}
#define __copy_to_user_inatomic __copy_to_user
@@ -268,7 +275,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return uaccess.copy_from_user(n, from, to);
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+ return uaccess.copy_from_user(n, from, to);
}
extern void copy_from_user_overflow(void)
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bc9746a..a73eb2e 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,9 +26,8 @@ struct vdso_data {
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
- __u32 ectg_available; /* ECTG instruction present 0x38 */
- __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
- __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
+ __u32 ectg_available;
+ __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 7e0b498..7a84619 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -199,7 +199,6 @@ typedef union
typedef struct
{
__u32 fpc;
- __u32 pad;
freg_t fprs[NUM_FPRS];
} s390_fp_regs;
@@ -207,6 +206,7 @@ typedef struct
#define FPC_FLAGS_MASK 0x00F80000
#define FPC_DXC_MASK 0x0000FF00
#define FPC_RM_MASK 0x00000003
+#define FPC_VALID_MASK 0xF8F8FF03
/* this typedef defines how a Program Status Word looks like */
typedef struct
@@ -263,7 +263,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
-#define PSW_MASK_USER 0x0000FF0180000000UL
+#define PSW_MASK_USER 0x0000FF8180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index b30de9c..584787f 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -49,7 +49,6 @@ typedef struct
typedef struct
{
unsigned int fpc;
- unsigned int pad;
double fprs[__NUM_FPRS];
} _s390_fp_regs;
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index c286c2e..9249449 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -82,6 +82,4 @@
#define SO_BUSY_POLL 46
-#define SO_MAX_PACING_RATE 47
-
#endif /* _ASM_SOCKET_H */