summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/crypto/ghash_s390.c25
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c4
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/barrier.h2
-rw-r--r--arch/s390/include/asm/cmpxchg.h2
-rw-r--r--arch/s390/include/asm/hugetlb.h4
-rw-r--r--arch/s390/include/asm/io.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h6
-rw-r--r--arch/s390/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/s390/include/asm/page.h8
-rw-r--r--arch/s390/include/asm/pgtable.h36
-rw-r--r--arch/s390/include/asm/sclp.h34
-rw-r--r--arch/s390/include/asm/timex.h5
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/kernel/compat_wrapper.c2
-rw-r--r--arch/s390/kernel/crash_dump.c13
-rw-r--r--arch/s390/kernel/debug.c11
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/setup.c25
-rw-r--r--arch/s390/kernel/smp.c13
-rw-r--r--arch/s390/kernel/suspend.c2
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kvm/intercept.c16
-rw-r--r--arch/s390/kvm/interrupt.c94
-rw-r--r--arch/s390/kvm/kvm-s390.c89
-rw-r--r--arch/s390/kvm/kvm-s390.h25
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/hugetlbpage.c70
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit.h12
-rw-r--r--arch/s390/net/bpf_jit_comp.c136
-rw-r--r--arch/s390/pci/pci_event.c8
37 files changed, 429 insertions, 276 deletions
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 7940dc9..b258110 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -16,11 +16,12 @@
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
- u8 icv[16];
- u8 key[16];
+ u8 key[GHASH_BLOCK_SIZE];
};
struct ghash_desc_ctx {
+ u8 icv[GHASH_BLOCK_SIZE];
+ u8 key[GHASH_BLOCK_SIZE];
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
memset(dctx, 0, sizeof(*dctx));
+ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
return 0;
}
@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
}
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
- memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
return 0;
}
@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
unsigned int n;
u8 *buf = dctx->buffer;
int ret;
@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
- ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n;
@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
+
+ dctx->bytes = 0;
}
- dctx->bytes = 0;
return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
int ret;
- ret = ghash_flush(ctx, dctx);
+ ret = ghash_flush(dctx);
if (!ret)
- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
return ret;
}
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 1f374b3..9d5192c 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
/* fill page with urandom bytes */
get_random_bytes(pg, PAGE_SIZE);
/* exor page with stckf values */
- for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
+ for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
*p ^= get_tod_clock_fast();
}
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index f043c3c..dd42a26 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -128,14 +128,14 @@ static struct hypfs_dbfs_file hypfs_sprp_file = {
int hypfs_sprp_init(void)
{
- if (!sclp_has_sprp())
+ if (!sclp.has_sprp)
return 0;
return hypfs_dbfs_create_file(&hypfs_sprp_file);
}
void hypfs_sprp_exit(void)
{
- if (!sclp_has_sprp())
+ if (!sclp.has_sprp)
return;
hypfs_dbfs_remove_file(&hypfs_sprp_file);
}
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index c631f98..dc5385e 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -4,5 +4,4 @@ generic-y += clkdev.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
-generic-y += scatterlist.h
generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 8d72471..e6f8615 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -36,7 +36,7 @@
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#define smp_store_release(p, v) \
do { \
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4eadec4..411464f 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -32,8 +32,6 @@
__old; \
})
-#define __HAVE_ARCH_CMPXCHG
-
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 11eae5f..0130d03 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -35,12 +35,8 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-#define hugetlb_prefault_arch_hook(mm) do { } while (0)
#define arch_clear_hugepage_flags(page) do { } while (0)
-int arch_prepare_hugepage(struct page *page);
-void arch_release_hugepage(struct page *page);
-
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 30fd5c8..cb5fdf3 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -29,6 +29,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d01fc58..3024acb 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -80,6 +80,7 @@ struct sca_block {
#define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040
+#define CPUSTAT_GED2 0x00000010
#define CPUSTAT_G 0x00000008
#define CPUSTAT_GED 0x00000004
#define CPUSTAT_J 0x00000002
@@ -95,7 +96,8 @@ struct kvm_s390_sie_block {
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
__u8 reserved10[16]; /* 0x0010 */
-#define PROG_BLOCK_SIE 0x00000001
+#define PROG_BLOCK_SIE (1<<0)
+#define PROG_REQUEST (1<<1)
atomic_t prog20; /* 0x0020 */
__u8 reserved24[4]; /* 0x0024 */
__u64 cputm; /* 0x0028 */
@@ -634,7 +636,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
diff --git a/arch/s390/include/asm/mm-arch-hooks.h b/arch/s390/include/asm/mm-arch-hooks.h
new file mode 100644
index 0000000..07680b2
--- /dev/null
+++ b/arch/s390/include/asm/mm-arch-hooks.h
@@ -0,0 +1,15 @@
+/*
+ * Architecture specific mm hooks
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_S390_MM_ARCH_HOOKS_H
+#define _ASM_S390_MM_ARCH_HOOKS_H
+
+#endif /* _ASM_S390_MM_ARCH_HOOKS_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 53eacbd..dd34523 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -17,7 +17,10 @@
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
-#define HPAGE_SHIFT 20
+#include <asm/setup.h>
+#ifndef __ASSEMBLY__
+
+extern int HPAGE_SHIFT;
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
@@ -27,9 +30,6 @@
#define ARCH_HAS_PREPARE_HUGEPAGE
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
-#include <asm/setup.h>
-#ifndef __ASSEMBLY__
-
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
#if PAGE_DEFAULT_KEY
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fc64239..f66d827 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}
-static inline int pmd_pfn(pmd_t pmd)
+static inline unsigned long pmd_pfn(pmd_t pmd)
{
unsigned long origin_mask;
@@ -1498,9 +1498,9 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
return pmd_young(pmd);
}
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
@@ -1509,10 +1509,10 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
return pmd;
}
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
-static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp, int full)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp, int full)
{
pmd_t pmd = *pmdp;
@@ -1522,11 +1522,11 @@ static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
return pmd;
}
-#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
-static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
{
- return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+ return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
}
#define __HAVE_ARCH_PMDP_INVALIDATE
@@ -1548,6 +1548,14 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
}
}
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
@@ -1565,9 +1573,9 @@ static inline int has_transparent_hugepage(void)
/*
* 64 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.
- * Bits 52 and bit 55 have to be zero, otherwise an specification
+ * Bits 52 and bit 55 have to be zero, otherwise a specification
* exception will occur instead of a page translation exception. The
- * specifiation exception has the bad habit not to store necessary
+ * specification exception has the bad habit not to store necessary
* information in the lowcore.
* Bits 54 and 63 are used to indicate the page type.
* A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index f1096ba..c891f41 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -46,33 +46,39 @@ struct sclp_cpu_info {
struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
};
+struct sclp_info {
+ unsigned char has_linemode : 1;
+ unsigned char has_vt220 : 1;
+ unsigned char has_siif : 1;
+ unsigned char has_sigpif : 1;
+ unsigned char has_cpu_type : 1;
+ unsigned char has_sprp : 1;
+ unsigned int ibc;
+ unsigned int mtid;
+ unsigned int mtid_cp;
+ unsigned int mtid_prev;
+ unsigned long long rzm;
+ unsigned long long rnmax;
+ unsigned long long hamax;
+ unsigned int max_cpu;
+ unsigned long hsa_size;
+ unsigned long long facilities;
+};
+extern struct sclp_info sclp;
+
int sclp_get_cpu_info(struct sclp_cpu_info *info);
int sclp_cpu_configure(u8 cpu);
int sclp_cpu_deconfigure(u8 cpu);
-unsigned long long sclp_get_rnmax(void);
-unsigned long long sclp_get_rzm(void);
-unsigned int sclp_get_max_cpu(void);
-unsigned int sclp_get_mtid(u8 cpu_type);
-unsigned int sclp_get_mtid_max(void);
-unsigned int sclp_get_mtid_prev(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
-bool __init sclp_has_linemode(void);
-bool __init sclp_has_vt220(void);
-bool sclp_has_sprp(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
-unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void);
-int sclp_has_siif(void);
-int sclp_has_sigpif(void);
-unsigned int sclp_get_ibc(void);
-
long _sclp_print_early(const char *);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 98eb2a5..dcb6312 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -10,6 +10,7 @@
#define _ASM_S390_TIMEX_H
#include <asm/lowcore.h>
+#include <linux/time64.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -108,10 +109,10 @@ int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
-void tod_to_timeval(__u64, struct timespec *);
+void tod_to_timeval(__u64 todval, struct timespec64 *xt);
static inline
-void stck_to_timespec(unsigned long long stck, struct timespec *ts)
+void stck_to_timespec64(unsigned long long stck, struct timespec64 *ts)
{
tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
}
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index b1453a2..4990f6c 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,7 +22,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_sibling_cpumask(cpu) \
+ (&per_cpu(cpu_topology, cpu).thread_mask)
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index d64a7a6..9dd4cc4 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -98,7 +98,8 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -118,7 +119,8 @@ unsigned long __must_check __copy_from_user(void *to, const void __user *from,
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -264,7 +266,8 @@ int __get_user_bad(void) __attribute__((noreturn));
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
*
* Copy data from kernel space to user space.
*
@@ -290,7 +293,8 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
*
* Copy data from user space to kernel space.
*
@@ -348,7 +352,8 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
*
* Get the size of a NUL-terminated string in user space.
*
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index d7fa2f0..f8498dd 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -202,7 +202,7 @@ COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags);
COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig);
COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig);
COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
-COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val);
+COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls);
COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags);
COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 9f73c80..7a75ad4 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -33,11 +33,12 @@ static struct memblock_type oldmem_type = {
};
#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
- for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \
+ for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \
+ &memblock.physmem, \
&oldmem_type, p_start, \
p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range(&i, nid, &memblock.physmem, \
+ __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\
&oldmem_type, \
p_start, p_end, p_nid))
@@ -122,7 +123,7 @@ static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
{
int rc;
- if (src < sclp_get_hsa_size()) {
+ if (src < sclp.hsa_size) {
rc = memcpy_hsa(buf, src, csize, userbuf);
} else {
if (userbuf)
@@ -215,7 +216,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
unsigned long pfn,
unsigned long size, pgprot_t prot)
{
- unsigned long hsa_end = sclp_get_hsa_size();
+ unsigned long hsa_end = sclp.hsa_size;
unsigned long size_hsa;
if (pfn < hsa_end >> PAGE_SHIFT) {
@@ -258,7 +259,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
return rc;
}
} else {
- unsigned long hsa_end = sclp_get_hsa_size();
+ unsigned long hsa_end = sclp.hsa_size;
if ((unsigned long) src < hsa_end) {
copied = min(count, hsa_end - (unsigned long) src);
rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
@@ -609,7 +610,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
return 0;
/* If we cannot get HSA size for zfcpdump return error */
- if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
+ if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size)
return -ENODEV;
/* For kdump, exclude previous crashkernel memory */
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c1f21ac..6fca0e4 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1457,23 +1457,24 @@ int
debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
- struct timespec time_spec;
+ struct timespec64 time_spec;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
- stck_to_timespec(entry->id.stck, &time_spec);
+ stck_to_timespec64(entry->id.stck, &time_spec);
if (entry->id.fields.exception)
except_str = "*";
else
except_str = "-";
caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
- rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p ",
- area, time_spec.tv_sec, time_spec.tv_nsec / 1000, level,
- except_str, entry->id.fields.cpuid, (void *) caller);
+ rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ",
+ area, (long long)time_spec.tv_sec,
+ time_spec.tv_nsec / 1000, level, except_str,
+ entry->id.fields.cpuid, (void *)caller);
return rc;
}
EXPORT_SYMBOL(debug_dflt_header_fn);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 99b44ac..3238893 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1005,7 +1005,7 @@ ENTRY(sie64a)
.Lsie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
- tm __SIE_PROG20+3(%r14),1 # last exit...
+ tm __SIE_PROG20+3(%r14),3 # last exit...
jnz .Lsie_done
LPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7262fe4..73941bf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -128,9 +128,9 @@ __setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
if (MACHINE_IS_KVM) {
- if (sclp_has_vt220())
+ if (sclp.has_vt220)
add_preferred_console("ttyS", 1, NULL);
- else if (sclp_has_linemode())
+ else if (sclp.has_linemode)
add_preferred_console("ttyS", 0, NULL);
else
add_preferred_console("hvc", 0, NULL);
@@ -510,8 +510,8 @@ static void reserve_memory_end(void)
{
#ifdef CONFIG_CRASH_DUMP
if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
- !OLDMEM_BASE && sclp_get_hsa_size()) {
- memory_end = sclp_get_hsa_size();
+ !OLDMEM_BASE && sclp.hsa_size) {
+ memory_end = sclp.hsa_size;
memory_end &= PAGE_MASK;
memory_end_set = 1;
}
@@ -576,7 +576,7 @@ static void __init reserve_crashkernel(void)
crash_base = low;
} else {
/* Find suitable area in free memory */
- low = max_t(unsigned long, crash_size, sclp_get_hsa_size());
+ low = max_t(unsigned long, crash_size, sclp.hsa_size);
high = crash_base ? crash_base + crash_size : ULONG_MAX;
if (crash_base && crash_base < low) {
@@ -640,19 +640,24 @@ static void __init check_initrd(void)
}
/*
- * Reserve all kernel text
+ * Reserve memory used for lowcore/command line/kernel image.
*/
static void __init reserve_kernel(void)
{
- unsigned long start_pfn;
- start_pfn = PFN_UP(__pa(&_end));
+ unsigned long start_pfn = PFN_UP(__pa(&_end));
+#ifdef CONFIG_DMA_API_DEBUG
/*
- * Reserve memory used for lowcore/command line/kernel image.
+ * DMA_API_DEBUG code stumbles over addresses from the
+ * range [_ehead, _stext]. Mark the memory as reserved
+ * so it is not used for CONFIG_DMA_API_DEBUG=y.
*/
+ memblock_reserve(0, PFN_PHYS(start_pfn));
+#else
memblock_reserve(0, (unsigned long)_ehead);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
+#endif
}
static void __init reserve_elfcorehdr(void)
@@ -875,6 +880,8 @@ void __init setup_arch(char **cmdline_p)
*/
setup_hwcaps();
+ HPAGE_SHIFT = MACHINE_HAS_HPAGE ? 20 : 0;
+
/*
* Create kernel page tables and switch to virtual addressing.
*/
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index efd2c19..0d9d59d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -601,7 +601,7 @@ static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
/* No previous system present, normal boot. */
return;
/* Set multi-threading state to the previous system. */
- pcpu_set_smt(sclp_get_mtid_prev());
+ pcpu_set_smt(sclp.mtid_prev);
/* Collect CPU states. */
cpu = 0;
for (i = 0; i < info->configured; i++) {
@@ -740,7 +740,7 @@ static void __init smp_detect_cpus(void)
#endif
/* Set multi-threading state for the current system */
- mtid = sclp_get_mtid(boot_cpu_type);
+ mtid = boot_cpu_type ? sclp.mtid : sclp.mtid_cp;
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
pcpu_set_smt(mtid);
@@ -880,12 +880,13 @@ void __noreturn cpu_die(void)
void __init smp_fill_possible_mask(void)
{
- unsigned int possible, sclp, cpu;
+ unsigned int possible, sclp_max, cpu;
- sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
- sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
+ sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
+ sclp_max = min(smp_max_threads, sclp_max);
+ sclp_max = sclp.max_cpu * sclp_max ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
- possible = min(possible, sclp);
+ possible = min(possible, sclp_max);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
set_cpu_possible(cpu, true);
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index d3236c9..39e2f41 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -9,10 +9,10 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/mm.h>
+#include <linux/pci.h>
#include <asm/ctl_reg.h>
#include <asm/ipl.h>
#include <asm/cio.h>
-#include <asm/pci.h>
#include <asm/sections.h>
#include "entry.h"
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 170ddd2..9e733d9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -76,7 +76,7 @@ unsigned long long monotonic_clock(void)
}
EXPORT_SYMBOL(monotonic_clock);
-void tod_to_timeval(__u64 todval, struct timespec *xt)
+void tod_to_timeval(__u64 todval, struct timespec64 *xt)
{
unsigned long long sec;
@@ -181,12 +181,12 @@ static void timing_alert_interrupt(struct ext_code ext_code,
static void etr_reset(void);
static void stp_reset(void);
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
{
tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
}
-void read_boot_clock(struct timespec *ts)
+void read_boot_clock64(struct timespec64 *ts)
{
tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 9e3779e..7365e8a 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -241,21 +241,6 @@ static int handle_prog(struct kvm_vcpu *vcpu)
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
}
-static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
-{
- int rc, rc2;
-
- vcpu->stat.exit_instr_and_program++;
- rc = handle_instruction(vcpu);
- rc2 = handle_prog(vcpu);
-
- if (rc == -EOPNOTSUPP)
- vcpu->arch.sie_block->icptcode = 0x04;
- if (rc)
- return rc;
- return rc2;
-}
-
/**
* handle_external_interrupt - used for external interruption interceptions
*
@@ -355,7 +340,6 @@ static const intercept_handler_t intercept_funcs[] = {
[0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction,
[0x08 >> 2] = handle_prog,
- [0x0C >> 2] = handle_instruction_and_prog,
[0x10 >> 2] = handle_noop,
[0x14 >> 2] = handle_external_interrupt,
[0x18 >> 2] = handle_noop,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9de4726..c98d897 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -134,6 +134,8 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
active_mask = pending_local_irqs(vcpu);
active_mask |= pending_floating_irqs(vcpu);
+ if (!active_mask)
+ return 0;
if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK;
@@ -799,7 +801,7 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
- if (!sclp_has_sigpif())
+ if (!sclp.has_sigpif)
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
return (sigp_ctrl & SIGP_CTRL_C) &&
@@ -941,12 +943,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
if (cpu_timer_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
- do {
- irqs = deliverable_irqs(vcpu);
+ while ((irqs = deliverable_irqs(vcpu)) && !rc) {
/* bits are in the order of interrupt priority */
irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
- if (irq_type == IRQ_PEND_COUNT)
- break;
if (is_ioirq(irq_type)) {
rc = __deliver_io(vcpu, irq_type);
} else {
@@ -958,9 +957,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
}
rc = func(vcpu);
}
- if (rc)
- break;
- } while (!rc);
+ }
set_intercept_indicators(vcpu);
@@ -1058,10 +1055,10 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
return -EINVAL;
- if (sclp_has_sigpif())
+ if (sclp.has_sigpif)
return __inject_extcall_sigpif(vcpu, src_id);
- if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
+ if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
*extcall = irq->u.extcall;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1340,12 +1337,54 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
return 0;
}
-static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+/*
+ * Find a destination VCPU for a floating irq and kick it.
+ */
+static void __floating_irq_kick(struct kvm *kvm, u64 type)
{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
+ struct kvm_vcpu *dst_vcpu;
+ int sigcpu, online_vcpus, nr_tries = 0;
+
+ online_vcpus = atomic_read(&kvm->online_vcpus);
+ if (!online_vcpus)
+ return;
+
+ /* find idle VCPUs first, then round robin */
+ sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
+ if (sigcpu == online_vcpus) {
+ do {
+ sigcpu = fi->next_rr_cpu;
+ fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
+ /* avoid endless loops if all vcpus are stopped */
+ if (nr_tries++ >= online_vcpus)
+ return;
+ } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
+ }
+ dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
+
+ /* make the VCPU drop out of the SIE, or wake it up if sleeping */
+ li = &dst_vcpu->arch.local_int;
+ spin_lock(&li->lock);
+ switch (type) {
+ case KVM_S390_MCHK:
+ atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+ break;
+ default:
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ break;
+ }
+ spin_unlock(&li->lock);
+ kvm_s390_vcpu_wakeup(dst_vcpu);
+}
+
+static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+{
struct kvm_s390_float_interrupt *fi;
- struct kvm_vcpu *dst_vcpu = NULL;
- int sigcpu;
u64 type = READ_ONCE(inti->type);
int rc;
@@ -1373,32 +1412,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
if (rc)
return rc;
- sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
- if (sigcpu == KVM_MAX_VCPUS) {
- do {
- sigcpu = fi->next_rr_cpu++;
- if (sigcpu == KVM_MAX_VCPUS)
- sigcpu = fi->next_rr_cpu = 0;
- } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
- }
- dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
- li = &dst_vcpu->arch.local_int;
- spin_lock(&li->lock);
- switch (type) {
- case KVM_S390_MCHK:
- atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
- break;
- default:
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
- break;
- }
- spin_unlock(&li->lock);
- kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
+ __floating_irq_kick(kvm, type);
return 0;
-
}
int kvm_s390_inject_vm(struct kvm *kvm,
@@ -1606,6 +1621,9 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
int i;
spin_lock(&fi->lock);
+ fi->pending_irqs = 0;
+ memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
+ memset(&fi->mchk, 0, sizeof(fi->mchk));
for (i = 0; i < FIRQ_LIST_COUNT; i++)
clear_irq_list(&fi->lists[i]);
for (i = 0; i < FIRQ_MAX_COUNT; i++)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8cd8e7b..2078f92 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -36,6 +36,10 @@
#include "kvm-s390.h"
#include "gaccess.h"
+#define KMSG_COMPONENT "kvm-s390"
+#undef pr_fmt
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "trace-s390.h"
@@ -110,7 +114,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
0xffe6fffbfcfdfc40UL,
- 0x005c800000000000UL,
+ 0x005e800000000000UL,
};
unsigned long kvm_s390_fac_list_mask_size(void)
@@ -236,6 +240,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
{
int r;
unsigned long n;
+ struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
@@ -245,7 +250,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
- memslot = id_to_memslot(kvm->memslots, log->slot);
+ slots = kvm_memslots(kvm);
+ memslot = id_to_memslot(slots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -454,10 +460,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_lock(&kvm->lock);
kvm->arch.epoch = gtod - host_tod;
- kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
+ kvm_s390_vcpu_block_all(kvm);
+ kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
- exit_sie(cur_vcpu);
- }
+ kvm_s390_vcpu_unblock_all(kvm);
mutex_unlock(&kvm->lock);
return 0;
}
@@ -604,7 +610,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
goto out;
}
get_cpu_id((struct cpuid *) &mach->cpuid);
- mach->ibc = sclp_get_ibc();
+ mach->ibc = sclp.ibc;
memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
@@ -1068,7 +1074,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
S390_ARCH_FAC_LIST_SIZE_BYTE);
kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
- kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
+ kvm->arch.model.ibc = sclp.ibc & 0x0fff;
if (kvm_s390_crypto_init(kvm) < 0)
goto out_err;
@@ -1311,8 +1317,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM |
- CPUSTAT_STOPPED |
- CPUSTAT_GED);
+ CPUSTAT_STOPPED);
+
+ if (test_kvm_facility(vcpu->kvm, 78))
+ atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+ else if (test_kvm_facility(vcpu->kvm, 8))
+ atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+
kvm_s390_vcpu_setup_model(vcpu);
vcpu->arch.sie_block->ecb = 6;
@@ -1321,9 +1332,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb2 = 8;
vcpu->arch.sie_block->eca = 0xC1002000U;
- if (sclp_has_siif())
+ if (sclp.has_siif)
vcpu->arch.sie_block->eca |= 1;
- if (sclp_has_sigpif())
+ if (sclp.has_sigpif)
vcpu->arch.sie_block->eca |= 0x10000000U;
if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu->arch.sie_block->eca |= 0x00020000;
@@ -1409,16 +1420,28 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_s390_vcpu_has_irq(vcpu, 0);
}
-void s390_vcpu_block(struct kvm_vcpu *vcpu)
+void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+ exit_sie(vcpu);
}
-void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
+void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
}
+static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
+{
+ atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+ exit_sie(vcpu);
+}
+
+static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
+{
+ atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+}
+
/*
* Kick a guest cpu out of SIE and wait until SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will
@@ -1430,11 +1453,11 @@ void exit_sie(struct kvm_vcpu *vcpu)
cpu_relax();
}
-/* Kick a guest cpu out of SIE and prevent SIE-reentry */
-void exit_sie_sync(struct kvm_vcpu *vcpu)
+/* Kick a guest cpu out of SIE to process a request synchronously */
+void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
{
- s390_vcpu_block(vcpu);
- exit_sie(vcpu);
+ kvm_make_request(req, vcpu);
+ kvm_s390_vcpu_request(vcpu);
}
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
@@ -1447,8 +1470,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
/* match against both prefix pages */
if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
- exit_sie_sync(vcpu);
+ kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
}
}
}
@@ -1720,8 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
+ if (!vcpu->requests)
+ return 0;
retry:
- s390_vcpu_unblock(vcpu);
+ kvm_s390_vcpu_request_handled(vcpu);
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
@@ -1993,12 +2017,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
* As PF_VCPU will be used in fault handler, between
* guest_enter and guest_exit should be no uaccess.
*/
- preempt_disable();
- kvm_guest_enter();
- preempt_enable();
+ local_irq_disable();
+ __kvm_guest_enter();
+ local_irq_enable();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
- kvm_guest_exit();
+ local_irq_disable();
+ __kvm_guest_exit();
+ local_irq_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
rc = vcpu_post_run(vcpu, exit_reason);
@@ -2068,7 +2094,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
} else if (is_vcpu_stopped(vcpu)) {
- pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
+ pr_err_ratelimited("can't run stopped vcpu %d\n",
vcpu->vcpu_id);
return -EINVAL;
}
@@ -2206,8 +2232,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
- kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
- exit_sie_sync(vcpu);
+ kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
}
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
@@ -2223,8 +2248,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
- kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
- exit_sie_sync(vcpu);
+ kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
}
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
@@ -2563,7 +2587,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
/* A few sanity checks. We can have memory slots which have to be
@@ -2581,8 +2605,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int rc;
@@ -2601,7 +2626,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
if (rc)
- printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
+ pr_warn("failed to commit memory region\n");
return;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ca108b9..c570478 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -211,10 +211,10 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
-void s390_vcpu_block(struct kvm_vcpu *vcpu);
-void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu);
-void exit_sie_sync(struct kvm_vcpu *vcpu);
+void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
/* is cmma enabled */
@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
struct kvm_s390_pgm_info *pgm_info);
+static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ WARN_ON(!mutex_is_locked(&kvm->lock));
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_s390_vcpu_block(vcpu);
+}
+
+static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_s390_vcpu_unblock(vcpu);
+}
+
/**
* kvm_s390_inject_prog_cond - conditionally inject a program check
* @vcpu: virtual cpu
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d22d8ee..ad42422 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -698,10 +698,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
case 0x00001000:
end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
break;
- /* We dont support EDAT2
case 0x00002000:
+ /* only support 2G frame size if EDAT2 is available and we are
+ not in 24-bit addressing mode */
+ if (!test_kvm_facility(vcpu->kvm, 78) ||
+ psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
- break;*/
+ break;
default:
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 76515bc..4c8f5d7 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -399,7 +399,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
+ if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e617e74..fb4bf2c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -86,31 +86,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- pmd_t pmd;
+ pmd_t pmd = __pte_to_pmd(pte);
- pmd = __pte_to_pmd(pte);
- if (!MACHINE_HAS_HPAGE) {
- /* Emulated huge ptes loose the dirty and young bit */
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
- pmd_val(pmd) |= pte_page(pte)[1].index;
- } else
- pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
- unsigned long origin;
- pmd_t pmd;
+ pmd_t pmd = *(pmd_t *) ptep;
- pmd = *(pmd_t *) ptep;
- if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
- origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
- pmd_val(pmd) |= *(unsigned long *) origin;
- /* Emulated huge ptes are young and dirty by definition */
- pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
- }
return __pmd_to_pte(pmd);
}
@@ -125,45 +110,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-int arch_prepare_hugepage(struct page *page)
-{
- unsigned long addr = page_to_phys(page);
- pte_t pte;
- pte_t *ptep;
- int i;
-
- if (MACHINE_HAS_HPAGE)
- return 0;
-
- ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
- if (!ptep)
- return -ENOMEM;
-
- pte_val(pte) = addr;
- for (i = 0; i < PTRS_PER_PTE; i++) {
- set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
- pte_val(pte) += PAGE_SIZE;
- }
- page[1].index = (unsigned long) ptep;
- return 0;
-}
-
-void arch_release_hugepage(struct page *page)
-{
- pte_t *ptep;
-
- if (MACHINE_HAS_HPAGE)
- return;
-
- ptep = (pte_t *) page[1].index;
- if (!ptep)
- return;
- clear_table((unsigned long *) ptep, _PAGE_INVALID,
- PTRS_PER_PTE * sizeof(pte_t));
- page_table_free(&init_mm, (unsigned long *) ptep);
- page[1].index = 0;
-}
-
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
@@ -193,17 +139,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmdp;
}
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-
int pmd_huge(pmd_t pmd)
{
- if (!MACHINE_HAS_HPAGE)
- return 0;
-
- return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+ return pmd_large(pmd);
}
int pud_huge(pud_t pud)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 80875c4..76e8737 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -213,7 +213,7 @@ unsigned long memory_block_size_bytes(void)
* Make sure the memory block size is always greater
* or equal than the memory increment size.
*/
- return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
+ return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 0f36043..e00f0d5 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -31,8 +31,8 @@ void __init detect_memory_memblock(void)
unsigned long addr, size;
int type;
- rzm = sclp_get_rzm();
- rnmax = sclp_get_rnmax();
+ rzm = sclp.rzm;
+ rnmax = sclp.rnmax;
memsize = rzm * rnmax;
if (!rzm)
rzm = 1ULL << 17;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b33f661..33082d0 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -31,6 +31,8 @@
#define ALLOC_ORDER 2
#define FRAG_MASK 0x03
+int HPAGE_SHIFT;
+
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index ba8593a5..f6498ee 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -28,6 +28,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* | old backchain | |
* +---------------+ |
* | r15 - r6 | |
+ * +---------------+ |
+ * | 4 byte align | |
+ * | tail_call_cnt | |
* BFP -> +===============+ |
* | | |
* | BPF stack | |
@@ -46,12 +49,17 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* R15 -> +---------------+ + low
*
* We get 160 bytes stack space from calling function, but only use
- * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
+ * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
*/
-#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8))
+#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_160_UNUSED (160 - 12 * 8)
+#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
+#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
+#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
+
/* Offset to skip condition code check */
#define OFF_OK 4
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 7690dc8..d3766dd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -21,6 +21,7 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/init.h>
+#include <linux/bpf.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
#include "bpf_jit.h"
@@ -40,6 +41,8 @@ struct bpf_jit {
int base_ip; /* Base address for literal pool */
int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */
+ int tail_call_start; /* Tail call start offset */
+ int labels[1]; /* Labels for local jumps */
};
#define BPF_SIZE_MAX 4096 /* Max size for program */
@@ -49,6 +52,7 @@ struct bpf_jit {
#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */
#define SEEN_LITERAL 8 /* code uses literals */
#define SEEN_FUNC 16 /* calls C functions */
+#define SEEN_TAIL_CALL 32 /* code uses tail calls */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
/*
@@ -60,6 +64,7 @@ struct bpf_jit {
#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */
#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */
#define REG_0 REG_W0 /* Register 0 */
+#define REG_1 REG_W1 /* Register 1 */
#define REG_2 BPF_REG_1 /* Register 2 */
#define REG_14 BPF_REG_0 /* Register 14 */
@@ -223,6 +228,24 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b3); \
})
+#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
+({ \
+ int rel = (jit->labels[label] - jit->prg) >> 1; \
+ _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \
+ op2 | mask << 12); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
+})
+
+#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
+({ \
+ int rel = (jit->labels[label] - jit->prg) >> 1; \
+ _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \
+ (rel & 0xffff), op2 | (imm & 0xff) << 8); \
+ REG_SET_SEEN(b1); \
+ BUILD_BUG_ON(((unsigned long) imm) > 0xff); \
+})
+
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
/* Branch instruction needs 6 bytes */ \
@@ -286,7 +309,7 @@ static void jit_fill_hole(void *area, unsigned int size)
*/
static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- u32 off = 72 + (rs - 6) * 8;
+ u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (rs == re)
/* stg %rs,off(%r15) */
@@ -301,7 +324,7 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
*/
static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- u32 off = 72 + (rs - 6) * 8;
+ u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (jit->seen & SEEN_STACK)
off += STK_OFF;
@@ -374,6 +397,16 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
*/
static void bpf_jit_prologue(struct bpf_jit *jit)
{
+ if (jit->seen & SEEN_TAIL_CALL) {
+ /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
+ _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
+ } else {
+ /* j tail_call_start: NOP if no tail calls are used */
+ EMIT4_PCREL(0xa7f40000, 6);
+ _EMIT2(0);
+ }
+ /* Tail calls have to skip above initialization */
+ jit->tail_call_start = jit->prg;
/* Save registers */
save_restore_regs(jit, REGS_SAVE);
/* Setup literal pool */
@@ -384,13 +417,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
}
/* Setup stack and backchain */
if (jit->seen & SEEN_STACK) {
- /* lgr %bfp,%r15 (BPF frame pointer) */
- EMIT4(0xb9040000, BPF_REG_FP, REG_15);
+ if (jit->seen & SEEN_FUNC)
+ /* lgr %w1,%r15 (backchain) */
+ EMIT4(0xb9040000, REG_W1, REG_15);
+ /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
+ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
if (jit->seen & SEEN_FUNC)
- /* stg %bfp,152(%r15) (backchain) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
+ /* stg %w1,152(%r15) (backchain) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
}
/*
@@ -443,8 +479,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/*
* Compile one eBPF instruction into s390x code
+ *
+ * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
+ * stack space for the large switch statement.
*/
-static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
+static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
{
struct bpf_insn *insn = &fp->insnsi[i];
int jmp_off, last, insn_count = 1;
@@ -588,8 +627,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9160000, dst_reg, rc_reg);
break;
}
- case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
- case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
@@ -602,10 +641,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
EMIT4(0xb9040000, REG_W1, dst_reg);
- /* llgfr %dst,%src (u32 cast) */
- EMIT4(0xb9160000, dst_reg, src_reg);
/* dlgr %w0,%dst */
- EMIT4(0xb9870000, REG_W0, dst_reg);
+ EMIT4(0xb9870000, REG_W0, src_reg);
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
@@ -632,8 +669,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9160000, dst_reg, rc_reg);
break;
}
- case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
- case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
@@ -649,7 +686,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9040000, REG_W1, dst_reg);
/* dlg %w0,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
- EMIT_CONST_U64((u32) imm));
+ EMIT_CONST_U64(imm));
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
@@ -947,6 +984,75 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
}
+ case BPF_JMP | BPF_CALL | BPF_X:
+ /*
+ * Implicit input:
+ * B1: pointer to ctx
+ * B2: pointer to bpf_array
+ * B3: index in bpf_array
+ */
+ jit->seen |= SEEN_TAIL_CALL;
+
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+
+ /* llgf %w1,map.max_entries(%b2) */
+ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
+ offsetof(struct bpf_array, map.max_entries));
+ /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+ REG_W1, 0, 0xa);
+
+ /*
+ * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+
+ if (jit->seen & SEEN_STACK)
+ off = STK_OFF_TCCNT + STK_OFF;
+ else
+ off = STK_OFF_TCCNT;
+ /* lhi %w0,1 */
+ EMIT4_IMM(0xa7080000, REG_W0, 1);
+ /* laal %w1,%w0,off(%r15) */
+ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
+ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
+ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
+ MAX_TAIL_CALL_CNT, 0, 0x2);
+
+ /*
+ * prog = array->prog[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+
+ /* sllg %r1,%b3,3: %r1 = index * 8 */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+ /* lg %r1,prog(%b2,%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
+ REG_1, offsetof(struct bpf_array, prog));
+ /* clgij %r1,0,0x8,label0 */
+ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
+
+ /*
+ * Restore registers before calling function
+ */
+ save_restore_regs(jit, REGS_RESTORE);
+
+ /*
+ * goto *(prog->bpf_func + tail_call_start);
+ */
+
+ /* lg %r1,bpf_func(%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
+ offsetof(struct bpf_prog, bpf_func));
+ /* bc 0xf,tail_call_start(%r1) */
+ _EMIT4(0x47f01000 + jit->tail_call_start);
+ /* out: */
+ jit->labels[0] = jit->prg;
+ break;
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
if (last && !(jit->seen & SEEN_RET0))
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 460fdb2..ed2394d 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -75,7 +75,13 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
zpci_err_hex(ccdf, sizeof(*ccdf));
switch (ccdf->pec) {
- case 0x0301: /* Standby -> Configured */
+ case 0x0301: /* Reserved|Standby -> Configured */
+ if (!zdev) {
+ ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
+ if (ret)
+ break;
+ zdev = get_zdev_by_fid(ccdf->fid);
+ }
if (!zdev || zdev->state != ZPCI_FN_STATE_STANDBY)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;