summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h8
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/include/asm/syscall.h6
-rw-r--r--arch/s390/include/asm/tlbflush.h30
7 files changed, 34 insertions, 37 deletions
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab..8e136b8 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d..8d665f1 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -158,14 +158,13 @@ extern unsigned int vdso_enabled;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. 64-bit
- tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_compat_task() ? \
- (STACK_TOP / 3 * 2) : \
- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index bea785d..af85d6b 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,6 +5,7 @@
#include <linux/errno.h>
typedef struct {
+ spinlock_t lock;
cpumask_t cpu_attach_mask;
atomic_t flush_count;
unsigned int flush_mm;
@@ -25,6 +26,7 @@ typedef struct {
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
+ .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
.context.pgtable_lock = \
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 515fea5..f65a708 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ spin_lock_init(&mm->context.lock);
spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
spin_lock_init(&mm->context.gmap_lock);
@@ -93,7 +94,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next)
return;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- cpumask_set_cpu(cpu, mm_cpumask(next));
/* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
@@ -111,9 +111,8 @@ static inline void finish_arch_post_lock_switch(void)
preempt_disable();
while (atomic_read(&mm->context.flush_count))
cpu_relax();
-
- if (mm->context.flush_mm)
- __tlb_flush_mm(mm);
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ __tlb_flush_mm_lazy(mm);
preempt_enable();
}
set_fs(current->thread.mm_segment);
@@ -126,6 +125,7 @@ static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0cea702..db74d39 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -480,7 +480,7 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
* In the case that a guest uses storage keys
* faults should no longer be backed by zero pages
*/
-#define mm_forbids_zeropage mm_use_skey
+#define mm_forbids_zeropage mm_has_pgste
static inline int mm_use_skey(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -1359,7 +1359,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
+ pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+
+ pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf9..6bc941b 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
unsigned long mask = -1UL;
+ /*
+ * No arguments for this syscall, there's nothing to do.
+ */
+ if (!n)
+ return;
+
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 3984610..eed927a 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -43,23 +43,6 @@ static inline void __tlb_flush_global(void)
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
* this implicates multiple ASCEs!).
*/
-static inline void __tlb_flush_full(struct mm_struct *mm)
-{
- preempt_disable();
- atomic_inc(&mm->context.flush_count);
- if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- /* Local TLB flush */
- __tlb_flush_local();
- } else {
- /* Global TLB flush */
- __tlb_flush_global();
- /* Reset TLB flush mask */
- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
- }
- atomic_dec(&mm->context.flush_count);
- preempt_enable();
-}
-
static inline void __tlb_flush_mm(struct mm_struct *mm)
{
unsigned long gmap_asce;
@@ -71,16 +54,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
*/
preempt_disable();
atomic_inc(&mm->context.flush_count);
+ /* Reset TLB flush mask */
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+ barrier();
gmap_asce = READ_ONCE(mm->context.gmap_asce);
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
if (gmap_asce)
__tlb_flush_idte(gmap_asce);
__tlb_flush_idte(mm->context.asce);
} else {
- __tlb_flush_full(mm);
+ /* Global TLB flush */
+ __tlb_flush_global();
}
- /* Reset TLB flush mask */
- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
@@ -94,7 +79,6 @@ static inline void __tlb_flush_kernel(void)
}
#else
#define __tlb_flush_global() __tlb_flush_local()
-#define __tlb_flush_full(mm) __tlb_flush_local()
/*
* Flush TLB entries for a specific ASCE on all CPUs.
@@ -112,10 +96,12 @@ static inline void __tlb_flush_kernel(void)
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
+ spin_lock(&mm->context.lock);
if (mm->context.flush_mm) {
- __tlb_flush_mm(mm);
mm->context.flush_mm = 0;
+ __tlb_flush_mm(mm);
}
+ spin_unlock(&mm->context.lock);
}
/*