summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/crypto/aes_s390.c31
-rw-r--r--arch/s390/include/asm/page.h38
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c25
-rw-r--r--arch/s390/kernel/time.c46
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S31
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S24
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/lib/uaccess_pt.c3
-rw-r--r--arch/s390/pci/pci_event.c2
21 files changed, 132 insertions, 122 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 314fced..e9f3125 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,7 +101,7 @@ config S390
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_TIME_VSYSCALL_OLD
+ select GENERIC_TIME_VSYSCALL
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_ARCH_SECCOMP_FILTER
@@ -135,7 +135,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
- select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
select OLD_SIGACTION
@@ -347,14 +346,14 @@ config SMP
Even if you don't know what to do here, say Y.
config NR_CPUS
- int "Maximum number of CPUs (2-64)"
- range 2 64
+ int "Maximum number of CPUs (2-256)"
+ range 2 256
depends on SMP
default "32" if !64BIT
default "64" if 64BIT
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 64 and the
+ kernel will support. The maximum supported value is 256 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 4363528..b3feabd 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -55,8 +55,7 @@ struct pcc_param {
struct s390_xts_ctx {
u8 key[32];
- u8 xts_param[16];
- struct pcc_param pcc;
+ u8 pcc_key[32];
long enc;
long dec;
int key_len;
@@ -591,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
case 48:
xts_ctx->enc = 0;
@@ -602,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -621,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
unsigned int nbytes = walk->nbytes;
unsigned int n;
u8 *in, *out;
- void *param;
+ struct pcc_param pcc_param;
+ struct {
+ u8 key[32];
+ u8 init[16];
+ } xts_param;
if (!nbytes)
goto out;
- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
- param = xts_ctx->pcc.key + offset;
- ret = crypt_s390_pcc(func, param);
+ memset(pcc_param.block, 0, sizeof(pcc_param.block));
+ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
- param = xts_ctx->key + offset;
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
do {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = crypt_s390_km(func, param, out, in, n);
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 316c850..114258e 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
: "memory", "cc");
}
+/*
+ * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
+ * bypass caches when copying a page. Especially when copying huge pages
+ * this keeps L1 and L2 data caches alive.
+ */
static inline void copy_page(void *to, void *from)
{
- if (MACHINE_HAS_MVPG) {
- register unsigned long reg0 asm ("0") = 0;
- asm volatile(
- " mvpg %0,%1"
- : : "a" (to), "a" (from), "d" (reg0)
- : "memory", "cc");
- } else
- asm volatile(
- " mvc 0(256,%0),0(%1)\n"
- " mvc 256(256,%0),256(%1)\n"
- " mvc 512(256,%0),512(%1)\n"
- " mvc 768(256,%0),768(%1)\n"
- " mvc 1024(256,%0),1024(%1)\n"
- " mvc 1280(256,%0),1280(%1)\n"
- " mvc 1536(256,%0),1536(%1)\n"
- " mvc 1792(256,%0),1792(%1)\n"
- " mvc 2048(256,%0),2048(%1)\n"
- " mvc 2304(256,%0),2304(%1)\n"
- " mvc 2560(256,%0),2560(%1)\n"
- " mvc 2816(256,%0),2816(%1)\n"
- " mvc 3072(256,%0),3072(%1)\n"
- " mvc 3328(256,%0),3328(%1)\n"
- " mvc 3584(256,%0),3584(%1)\n"
- " mvc 3840(256,%0),3840(%1)\n"
- : : "a" (to), "a" (from) : "memory");
+ register void *reg2 asm ("2") = to;
+ register unsigned long reg3 asm ("3") = 0x1000;
+ register void *reg4 asm ("4") = from;
+ register unsigned long reg5 asm ("5") = 0xb0001000;
+ asm volatile(
+ " mvcl 2,4"
+ : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
+ : : "memory", "cc");
}
#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 30ef748..2f39095 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <asm/chpid.h>
+#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
@@ -37,7 +38,7 @@ struct sclp_cpu_info {
unsigned int standby;
unsigned int combined;
int has_cpu_type;
- struct sclp_cpu_entry cpu[255];
+ struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
};
int sclp_get_cpu_info(struct sclp_cpu_info *info);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ac9bed8..1607793 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -31,6 +31,7 @@ extern void smp_yield(void);
extern void smp_stop_cpu(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_stop_cpu(void) { }
+static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a73eb2e..bc9746a 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,8 +26,9 @@ struct vdso_data {
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
- __u32 ectg_available;
- __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
+ __u32 ectg_available; /* ECTG instruction present 0x38 */
+ __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
+ __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2416138..e4c99a1 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -65,12 +65,14 @@ int main(void)
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
- DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
+ DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
+ DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* idle data offsets */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6e24429..95e7ba0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
- regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
(__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 4a460c4..813ec72 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_DEFAULT /* 38 */
+PGM_CHECK_64BIT(do_dat_exception) /* 38 */
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4444875..0f3d44e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
setup_vmcoreinfo();
setup_lowcore();
+ smp_fill_possible_mask();
cpu_init();
s390_init_cpu_topology();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index fb53587..d8fd508 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
- regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index dc4a534..9587047 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return 0;
}
-static int __init setup_possible_cpus(char *s)
-{
- int max, cpu;
+static unsigned int setup_possible_cpus __initdata;
- if (kstrtoint(s, 0, &max) < 0)
- return 0;
- init_cpu_possible(cpumask_of(0));
- for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
- set_cpu_possible(cpu, true);
+static int __init _setup_possible_cpus(char *s)
+{
+ get_option(&s, &setup_possible_cpus);
return 0;
}
-early_param("possible_cpus", setup_possible_cpus);
+early_param("possible_cpus", _setup_possible_cpus);
#ifdef CONFIG_HOTPLUG_CPU
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
+void __init smp_fill_possible_mask(void)
+{
+ unsigned int possible, cpu;
+
+ possible = setup_possible_cpus;
+ if (!possible)
+ possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
+ for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
+ set_cpu_possible(cpu, true);
+}
+
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 064c308..dd95f163 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
set_clock_comparator(S390_lowcore.clock_comparator);
}
-static int s390_next_ktime(ktime_t expires,
+static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- struct timespec ts;
- u64 nsecs;
-
- ts.tv_sec = ts.tv_nsec = 0;
- monotonic_to_bootbased(&ts);
- nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
- do_div(nsecs, 125);
- S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
- /* Program the maximum value if we have an overflow (== year 2042) */
- if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
- S390_lowcore.clock_comparator = -1ULL;
+ S390_lowcore.clock_comparator = get_tod_clock() + delta;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
cd->name = "comparator";
- cd->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_KTIME;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->mult = 16777;
cd->shift = 12;
cd->min_delta_ns = 1;
cd->max_delta_ns = LONG_MAX;
cd->rating = 400;
cd->cpumask = cpumask_of(cpu);
- cd->set_next_ktime = s390_next_ktime;
+ cd->set_next_event = s390_next_event;
cd->set_mode = s390_set_mode;
clockevents_register_device(cd);
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
return &clocksource_tod;
}
-void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
- struct clocksource *clock, u32 mult)
+void update_vsyscall(struct timekeeper *tk)
{
- if (clock != &clocksource_tod)
+ u64 nsecps;
+
+ if (tk->clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = clock->cycle_last;
- vdso_data->xtime_clock_sec = wall_time->tv_sec;
- vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
- vdso_data->wtom_clock_sec = wtm->tv_sec;
- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
- vdso_data->ntp_mult = mult;
+ vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
+ vdso_data->xtime_clock_sec = tk->xtime_sec;
+ vdso_data->xtime_clock_nsec = tk->xtime_nsec;
+ vdso_data->wtom_clock_sec =
+ tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtom_clock_nsec = tk->xtime_nsec +
+ + (tk->wall_to_monotonic.tv_nsec << tk->shift);
+ nsecps = (u64) NSEC_PER_SEC << tk->shift;
+ while (vdso_data->wtom_clock_nsec >= nsecps) {
+ vdso_data->wtom_clock_nsec -= nsecps;
+ vdso_data->wtom_clock_sec++;
+ }
+ vdso_data->tk_mult = tk->mult;
+ vdso_data->tk_shift = tk->shift;
smp_wmb();
++vdso_data->tb_update_count;
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a84476f..6136490 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
psal[i] = 0x80000000;
lowcore->paste[4] = (u32)(addr_t) psal;
- psal[0] = 0x20000000;
+ psal[0] = 0x02000000;
psal[2] = (u32)(addr_t) aste;
*(unsigned long *) (aste + 2) = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index b2224e0..65fc397 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,25 +38,21 @@ __kernel_clock_gettime:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
ahi %r0,-1
-2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
+2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
lr %r2,%r0
- l %r0,__VDSO_NTP_MULT(%r5)
+ l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 3f
- a %r0,__VDSO_NTP_MULT(%r5)
+ a %r0,__VDSO_TK_MULT(%r5)
3: alr %r0,%r2
- srdl %r0,12
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- al %r1,__VDSO_XTIME_NSEC+4(%r5)
- brc 12,4f
- ahi %r0,1
-4: l %r2,__VDSO_XTIME_SEC+4(%r5)
- al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
+ al %r0,__VDSO_WTOM_NSEC(%r5)
al %r1,__VDSO_WTOM_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
-5: al %r2,__VDSO_WTOM_SEC+4(%r5)
+5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r2) /* >> tk->shift */
+ l %r2,__VDSO_WTOM_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
basr %r5,0
@@ -86,20 +82,21 @@ __kernel_clock_gettime:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
-12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
+12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
lr %r2,%r0
- l %r0,__VDSO_NTP_MULT(%r5)
+ l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 13f
- a %r0,__VDSO_NTP_MULT(%r5)
+ a %r0,__VDSO_TK_MULT(%r5)
13: alr %r0,%r2
- srdl %r0,12
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,14f
ahi %r0,1
-14: l %r2,__VDSO_XTIME_SEC+4(%r5)
+14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r2) /* >> tk->shift */
+ l %r2,__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 11b
basr %r5,0
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 2d36331..fd621a9 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
-3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
+3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
st %r0,24(%r15)
- l %r0,__VDSO_NTP_MULT(%r5)
+ l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 4f
- a %r0,__VDSO_NTP_MULT(%r5)
+ a %r0,__VDSO_TK_MULT(%r5)
4: al %r0,24(%r15)
- srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,5f
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
+ l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r4) /* >> tk->shift */
l %r4,24(%r15) /* get tv_sec from stack */
basr %r5,0
6: ltr %r0,%r0
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 176e1f7..34deba7 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -23,7 +23,9 @@ __kernel_clock_getres:
je 0f
cghi %r2,__CLOCK_MONOTONIC
je 0f
- cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ cghi %r2,__CLOCK_THREAD_CPUTIME_ID
+ je 0f
+ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
jne 2f
larl %r5,_vdso_data
icm %r0,15,__LC_ECTG_OK(%r5)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index d46c95e..91940ed 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,7 +22,9 @@ __kernel_clock_gettime:
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME
je 4f
- cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ cghi %r2,__CLOCK_THREAD_CPUTIME_ID
+ je 9f
+ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC
jne 12f
@@ -34,14 +36,13 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stck 48(%r15) /* Store TOD clock */
+ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- lg %r0,__VDSO_XTIME_SEC(%r5)
- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
- alg %r0,__VDSO_WTOM_SEC(%r5)
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ alg %r1,__VDSO_WTOM_NSEC(%r5)
+ srlg %r1,%r1,0(%r2) /* >> tk->shift */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
larl %r5,13f
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stck 48(%r15) /* Store TOD clock */
+ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- lg %r0,__VDSO_XTIME_SEC(%r5)
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+ srlg %r1,%r1,0(%r2) /* >> tk->shift */
+ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 5b
larl %r5,13f
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 36ee674..d0860d1 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
- lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
+ lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srlg %r1,%r1,0(%r5) /* >> tk->shift */
larl %r5,5f
2: clg %r1,0(%r5)
jl 3f
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 97e03ca..dbdab3e 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* contains the (negative) exception code.
*/
#ifdef CONFIG_64BIT
+
static unsigned long follow_table(struct mm_struct *mm,
unsigned long address, int write)
{
unsigned long *table = (unsigned long *)__pa(mm->pgd);
+ if (unlikely(address > mm->context.asce_limit - 1))
+ return -0x38UL;
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 800f064..0696072 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zdev->fh = ccdf->fh;
ret = zpci_enable_device(zdev);
if (ret)
break;
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
if (pdev)
pci_stop_and_remove_bus_device(pdev);
+ zdev->fh = ccdf->fh;
zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
break;