summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /arch/arm64
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig23
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi75
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_timer.h42
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/compat.h14
-rw-r--r--arch/arm64/include/asm/cpu_ops.h59
-rw-r--r--arch/arm64/include/asm/dma-mapping.h14
-rw-r--r--arch/arm64/include/asm/elf.h18
-rw-r--r--arch/arm64/include/asm/hwcap.h11
-rw-r--r--arch/arm64/include/asm/io.h14
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/kvm_arm.h8
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h61
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm64/include/asm/memory.h11
-rw-r--r--arch/arm64/include/asm/pgalloc.h9
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h35
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/prom.h1
-rw-r--r--arch/arm64/include/asm/psci.h19
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/spinlock.h83
-rw-r--r--arch/arm64/include/asm/spinlock_types.h15
-rw-r--r--arch/arm64/include/asm/syscall.h6
-rw-r--r--arch/arm64/include/asm/thread_info.h6
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h47
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c87
-rw-r--r--arch/arm64/kernel/cputable.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c33
-rw-r--r--arch/arm64/kernel/entry.S31
-rw-r--r--arch/arm64/kernel/head.S62
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c22
-rw-r--r--arch/arm64/kernel/irq.c61
-rw-r--r--arch/arm64/kernel/kuser32.S57
-rw-r--r--arch/arm64/kernel/module.c7
-rw-r--r--arch/arm64/kernel/perf_event.c11
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/psci.c87
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c85
-rw-r--r--arch/arm64/kernel/signal32.c39
-rw-r--r--arch/arm64/kernel/smp.c215
-rw-r--r--arch/arm64/kernel/smp_psci.c53
-rw-r--r--arch/arm64/kernel/smp_spin_table.c97
-rw-r--r--arch/arm64/kernel/sys32.S22
-rw-r--r--arch/arm64/kernel/time.c10
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S32
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/guest.c20
-rw-r--r--arch/arm64/kvm/handle_exit.c18
-rw-r--r--arch/arm64/kvm/hyp-init.S5
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/mm/init.c25
-rw-r--r--arch/arm64/mm/ioremap.c20
-rw-r--r--arch/arm64/mm/proc.S6
-rw-r--r--arch/arm64/xen/Makefile2
73 files changed, 575 insertions, 1293 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6d4dd22..c044548 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,7 +1,6 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
@@ -15,7 +14,6 @@ config ARM64
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
@@ -63,6 +61,10 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
+config GENERIC_LOCKBREAK
+ def_bool y
+ depends on SMP && PREEMPT
+
config RWSEM_GENERIC_SPINLOCK
def_bool y
@@ -136,13 +138,9 @@ config ARM64_64K_PAGES
look-up. AArch32 emulation is not available when this feature
is enabled.
-config CPU_BIG_ENDIAN
- bool "Build big-endian kernel"
- help
- Say Y if you plan on running a kernel in big-endian mode.
-
config SMP
bool "Symmetric Multi-Processing"
+ select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If
you say N here, the kernel will run on single and
@@ -159,14 +157,8 @@ config NR_CPUS
range 2 32
depends on SMP
# These have to remain sorted largest to smallest
- default "8"
-
-config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
- depends on SMP
- help
- Say Y here to experiment with turning CPUs off and on. CPUs
- can be controlled through /sys/devices/system/cpu.
+ default "8" if ARCH_XGENE
+ default "4"
source kernel/Kconfig.preempt
@@ -219,7 +211,6 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64 (EXPERIMENTAL)"
depends on ARM64 && OF
- select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2fceb71..d90cf79 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,15 +20,9 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
-ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
-KBUILD_CPPFLAGS += -mbig-endian
-AS += -EB
-LD += -EB
-else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
-endif
comma = ,
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index d37d736..bfdc578 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -103,81 +103,6 @@
#size-cells = <2>;
ranges;
- clocks {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- refclk: refclk {
- compatible = "fixed-clock";
- #clock-cells = <1>;
- clock-frequency = <100000000>;
- clock-output-names = "refclk";
- };
-
- pcppll: pcppll@17000100 {
- compatible = "apm,xgene-pcppll-clock";
- #clock-cells = <1>;
- clocks = <&refclk 0>;
- clock-names = "pcppll";
- reg = <0x0 0x17000100 0x0 0x1000>;
- clock-output-names = "pcppll";
- type = <0>;
- };
-
- socpll: socpll@17000120 {
- compatible = "apm,xgene-socpll-clock";
- #clock-cells = <1>;
- clocks = <&refclk 0>;
- clock-names = "socpll";
- reg = <0x0 0x17000120 0x0 0x1000>;
- clock-output-names = "socpll";
- type = <1>;
- };
-
- socplldiv2: socplldiv2 {
- compatible = "fixed-factor-clock";
- #clock-cells = <1>;
- clocks = <&socpll 0>;
- clock-names = "socplldiv2";
- clock-mult = <1>;
- clock-div = <2>;
- clock-output-names = "socplldiv2";
- };
-
- qmlclk: qmlclk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&socplldiv2 0>;
- clock-names = "qmlclk";
- reg = <0x0 0x1703C000 0x0 0x1000>;
- reg-names = "csr-reg";
- clock-output-names = "qmlclk";
- };
-
- ethclk: ethclk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&socplldiv2 0>;
- clock-names = "ethclk";
- reg = <0x0 0x17000000 0x0 0x1000>;
- reg-names = "div-reg";
- divider-offset = <0x238>;
- divider-width = <0x9>;
- divider-shift = <0x0>;
- clock-output-names = "ethclk";
- };
-
- eth8clk: eth8clk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&ethclk 0>;
- clock-names = "eth8clk";
- reg = <0x0 0x1702C000 0x0 0x1000>;
- reg-names = "csr-reg";
- clock-output-names = "eth8clk";
- };
- };
-
serial0: serial@1c020000 {
device_type = "serial";
compatible = "ns16550";
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 519c4b2..84fcc50 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,8 +6,6 @@
/dts-v1/;
-/memreserve/ 0x80000000 0x00010000;
-
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 84139be..31c81e9 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -26,7 +26,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
-CONFIG_PREEMPT=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 519f89f..79a642d 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -50,4 +50,3 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 9400596..c9f1d28 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -92,49 +92,19 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u32 arch_timer_get_cntkctl(void)
+static inline void arch_counter_set_user_access(void)
{
u32 cntkctl;
+
+ /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- return cntkctl;
-}
+ cntkctl &= ~((3 << 8) | (1 << 0));
-static inline void arch_timer_set_cntkctl(u32 cntkctl)
-{
+ /* Enable user access to the virtual counter and frequency. */
+ cntkctl |= (1 << 1);
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
-static inline void arch_counter_set_user_access(void)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
-
- /* Disable user access to the timers and the physical counter */
- /* Also disable virtual event stream */
- cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
- | ARCH_TIMER_USR_VT_ACCESS_EN
- | ARCH_TIMER_VIRT_EVT_EN
- | ARCH_TIMER_USR_PCT_ACCESS_EN);
-
- /* Enable user access to the virtual counter */
- cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
-
- arch_timer_set_cntkctl(cntkctl);
-}
-
-static inline void arch_timer_evtstrm_enable(int divider)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
- cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
- /* Set the divider and enable virtual event stream */
- cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
- | ARCH_TIMER_VIRT_EVT_EN;
- arch_timer_set_cntkctl(cntkctl);
- elf_hwcap |= HWCAP_EVTSTRM;
-#ifdef CONFIG_COMPAT
- compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
-#endif
-}
-
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index fd3e392..5aceb83 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -115,34 +115,3 @@ lr .req x30 // link register
.align 7
b \label
.endm
-
-/*
- * Select code when configured for BE.
- */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CPU_BE(code...) code
-#else
-#define CPU_BE(code...)
-#endif
-
-/*
- * Select code when configured for LE.
- */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CPU_LE(code...)
-#else
-#define CPU_LE(code...) code
-#endif
-
-/*
- * Define a macro that constructs a 64-bit value by concatenating two
- * 32-bit registers. Note that on big endian systems the order of the
- * registers is swapped.
- */
-#ifndef CONFIG_CPU_BIG_ENDIAN
- .macro regs_to_64, rd, lbits, hbits
-#else
- .macro regs_to_64, rd, hbits, lbits
-#endif
- orr \rd, \lbits, \hbits, lsl #32
- .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aa..8363644 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -126,6 +126,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long tmp, tmp2;
+
+ asm volatile("// atomic_clear_mask\n"
+"1: ldxr %0, %2\n"
+" bic %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
+" cbnz %w1, 1b"
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
+ : "Ir" (mask)
+ : "cc");
+}
+
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3914c0d..8a8ce0e 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -173,6 +173,4 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
-#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index fda2704..899af80 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,11 +26,7 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
-#ifdef __AARCH64EB__
-#define COMPAT_UTS_MACHINE "armv8b\0\0"
-#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
-#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -77,23 +73,13 @@ struct compat_timeval {
};
struct compat_stat {
-#ifdef __AARCH64EB__
- short st_dev;
- short __pad1;
-#else
compat_dev_t st_dev;
-#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
-#ifdef __AARCH64EB__
- short st_rdev;
- short __pad2;
-#else
compat_dev_t st_rdev;
-#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
deleted file mode 100644
index c4cdb5e..0000000
--- a/arch/arm64/include/asm/cpu_ops.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_CPU_OPS_H
-#define __ASM_CPU_OPS_H
-
-#include <linux/init.h>
-#include <linux/threads.h>
-
-struct device_node;
-
-/**
- * struct cpu_operations - Callback operations for hotplugging CPUs.
- *
- * @name: Name of the property as appears in a devicetree cpu node's
- * enable-method property.
- * @cpu_init: Reads any data necessary for a specific enable-method from the
- * devicetree, for a given cpu node and proposed logical id.
- * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
- * mechanism for doing so, tests whether it is possible to boot
- * the given CPU.
- * @cpu_boot: Boots a cpu into the kernel.
- * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
- * synchronisation. Called from the cpu being booted.
- * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
- * reason, which will cause the hot unplug to be aborted. Called
- * from the cpu to be killed.
- * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
- * cpu being killed.
- */
-struct cpu_operations {
- const char *name;
- int (*cpu_init)(struct device_node *, unsigned int);
- int (*cpu_prepare)(unsigned int);
- int (*cpu_boot)(unsigned int);
- void (*cpu_postboot)(void);
-#ifdef CONFIG_HOTPLUG_CPU
- int (*cpu_disable)(unsigned int cpu);
- void (*cpu_die)(unsigned int cpu);
-#endif
-};
-
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-extern int __init cpu_read_ops(struct device_node *dn, int cpu);
-extern void __init cpu_read_bootcpu_ops(void);
-
-#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index fd0c0c0..8d18100 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,15 +23,11 @@
#include <asm-generic/dma-coherent.h>
-#include <xen/xen.h>
-#include <asm/xen/hypervisor.h>
-
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
@@ -39,14 +35,6 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (xen_initial_domain())
- return xen_dma_ops;
- else
- return __generic_dma_ops(dev);
-}
-
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 01d3aab..e7fa87f 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -90,24 +90,11 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
-#ifdef __AARCH64EB__
-#define ELF_DATA ELFDATA2MSB
-#else
#define ELF_DATA ELFDATA2LSB
-#endif
#define ELF_ARCH EM_AARCH64
-/*
- * This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
- * intent than poking at uname or /proc/cpuinfo.
- */
#define ELF_PLATFORM_SIZE 16
-#ifdef __AARCH64EB__
-#define ELF_PLATFORM ("aarch64_be")
-#else
#define ELF_PLATFORM ("aarch64")
-#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -162,12 +149,7 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
-
-#ifdef __AARCH64EB__
-#define COMPAT_ELF_PLATFORM ("v8b")
-#else
#define COMPAT_ELF_PLATFORM ("v8l")
-#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6cddbb0..e2950b0 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,7 +30,6 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
-#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#ifndef __ASSEMBLY__
/*
@@ -38,11 +37,11 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-
-#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
-extern unsigned int compat_elf_hwcap;
-#endif
+#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
extern unsigned long elf_hwcap;
#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 5727697..1d12f89 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,14 +22,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
-#include <xen/xen.h>
-
/*
* Generic IO read/write. These perform native-endian accesses.
*/
@@ -227,9 +224,8 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
-extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
-#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
@@ -237,6 +233,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
+#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL))
#define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
@@ -266,12 +263,5 @@ extern int devmem_is_allowed(unsigned long pfn);
*/
#define xlate_dev_kmem_ptr(p) p
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index e1f7ecd..0332fc0 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,7 +4,6 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
-extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index b2fcfbc..aa11943 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -56,9 +56,6 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
-#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
-#define local_async_disable() asm("msr daifset, #4" : : : "memory")
-
/*
* Save the current interrupt enable state.
*/
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef47..a5f28e2 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -63,7 +63,6 @@
* TAC: Trap ACTLR
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
- * TWE: Trap WFE
* TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
@@ -73,9 +72,8 @@
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
-#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
- HCR_BSU_IS | HCR_FB | HCR_TAC | \
- HCR_AMO | HCR_IMO | HCR_FMO | \
+#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
@@ -244,6 +242,4 @@
#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
-#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
-
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc..eec0738 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -177,65 +177,4 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
-{
- return vcpu_sys_reg(vcpu, MPIDR_EL1);
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- if (vcpu_mode_is_32bit(vcpu))
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
- else
- vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- if (vcpu_mode_is_32bit(vcpu))
- return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
-
- return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- case 4:
- return be32_to_cpu(data & 0xffffffff);
- default:
- return be64_to_cpu(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- case 4:
- return cpu_to_be32(data & 0xffffffff);
- default:
- return cpu_to_be64(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5d85a02..0859a4d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -36,6 +36,11 @@
#define KVM_VCPU_MAX_FEATURES 2
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
struct kvm_vcpu;
int kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -146,7 +151,6 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 680f74e..efe609c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -91,7 +91,6 @@ int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
-#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
static inline bool kvm_is_write_fault(unsigned long esr)
{
@@ -117,18 +116,13 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= PTE_S2_RDWR;
}
-static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
-{
- pmd_val(*pmd) |= PMD_S2_RDWR;
-}
-
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
- unsigned long size)
+static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
{
if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range(hva, hva + size);
+ unsigned long hva = gfn_to_hva(kvm, gfn);
+ flush_icache_range(hva, hva + PAGE_SIZE);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3776217..20925bc 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,23 +33,18 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
- * (VA_BITS - 1))
+ * PAGE_OFFSET - the virtual address of the start of the kernel image.
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#ifdef CONFIG_ARM64_64K_PAGES
-#define VA_BITS (42)
-#else
-#define VA_BITS (39)
-#endif
-#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
+#define PAGE_OFFSET UL(0xffffffc000000000)
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
+#define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 9bea6e7..f214069 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -63,12 +63,9 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
struct page *pte;
pte = alloc_pages(PGALLOC_GFP, 0);
- if (!pte)
- return NULL;
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
+ if (pte)
+ pgtable_page_ctor(pte);
+
return pte;
}
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 2593b49..0a8ed3f 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 4TB in the 64KB page configuration.
+ * 512GB and therefore we only use 1024 entries in the PGD.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 8192
+#define PTRS_PER_PGD 1024
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b1d2e26..d57e668 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -43,7 +43,7 @@
* Section
*/
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
@@ -85,8 +85,6 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
-
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7f2b60a..f0bebc5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,16 +25,15 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
+#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
- /* bit 57 for PMD_SECT_SPLITTING */
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
+#define VMALLOC_START UL(0xffffff8000000000)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
@@ -255,7 +254,7 @@ static inline int has_transparent_hugepage(void)
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT
@@ -358,20 +357,18 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-8: swap type
- * bits 9-57: swap offset
+ * bits 0, 2: present (must both be zero)
+ * bit 3: PTE_FILE
+ * bits 4-8: swap type
+ * bits 9-63: swap offset
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 4
#define __SWP_TYPE_BITS 6
-#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
-#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -385,15 +382,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a file entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-57: file offset / PAGE_SIZE
+ * bits 0, 2: present (must both be zero)
+ * bit 3: PTE_FILE
+ * bits 4-63: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 4)
+#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 55
+#define PTE_FILE_MAX_BITS 60
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 45b20cd..ab239b2 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -107,11 +107,6 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
-
-#ifdef __AARCH64EB__
- regs->pstate |= COMPAT_PSR_E_BIT;
-#endif
-
regs->compat_sp = sp;
}
#endif
diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h
new file mode 100644
index 0000000..68b90e6
--- /dev/null
+++ b/arch/arm64/include/asm/prom.h
@@ -0,0 +1 @@
+/* Empty for now */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index e5312ea..0604237 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,6 +14,25 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
int psci_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 0e7fa49..0dacbbf 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,7 +42,6 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
-#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index a498f2c..4b8023c 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,14 +60,21 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_entry(void);
+extern void secondary_holding_pen(void);
+extern volatile unsigned long secondary_holding_pen_release;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-extern int __cpu_disable(void);
+struct device_node;
-extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
+struct smp_enable_ops {
+ const char *name;
+ int (*init_cpu)(struct device_node *, int);
+ int (*prepare_cpu)(int);
+};
+
+extern const struct smp_enable_ops smp_spin_table_ops;
+extern const struct smp_enable_ops smp_psci_ops;
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf06..0defa07 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,10 +22,17 @@
/*
* Spinlock implementation.
*
+ * The old value is read exclusively and the new one, if unlocked, is written
+ * exclusively. In case of failure, the loop is restarted.
+ *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
+ *
+ * Unlocked value: 0
+ * Locked value: 1
*/
+#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -34,51 +41,32 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
- arch_spinlock_t lockval, newval;
asm volatile(
- /* Atomically increment the next ticket. */
-" prfm pstl1strm, %3\n"
-"1: ldaxr %w0, %3\n"
-" add %w1, %w0, %w5\n"
-" stxr %w2, %w1, %3\n"
-" cbnz %w2, 1b\n"
- /* Did we get the lock? */
-" eor %w1, %w0, %w0, ror #16\n"
-" cbz %w1, 3f\n"
- /*
- * No: spin on the owner. Send a local event to avoid missing an
- * unlock before the exclusive load.
- */
-" sevl\n"
-"2: wfe\n"
-" ldaxrh %w2, %4\n"
-" eor %w1, %w2, %w0, lsr #16\n"
-" cbnz %w1, 2b\n"
- /* We got the lock. Critical section starts here. */
-"3:"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
- : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
- : "memory");
+ " sevl\n"
+ "1: wfe\n"
+ "2: ldaxr %w0, %1\n"
+ " cbnz %w0, 1b\n"
+ " stxr %w0, %w2, %1\n"
+ " cbnz %w0, 2b\n"
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
- arch_spinlock_t lockval;
asm volatile(
-" prfm pstl1strm, %2\n"
-"1: ldaxr %w0, %2\n"
-" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 2f\n"
-" add %w0, %w0, %3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b\n"
-"2:"
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- : "I" (1 << TICKET_SHIFT)
- : "memory");
+ "2: ldaxr %w0, %1\n"
+ " cbnz %w0, 1f\n"
+ " stxr %w0, %w2, %1\n"
+ " cbnz %w0, 2b\n"
+ "1:\n"
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
return !tmp;
}
@@ -86,28 +74,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
-" stlrh %w1, %0\n"
- : "=Q" (lock->owner)
- : "r" (lock->owner + 1)
- : "memory");
-}
-
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.owner == lock.next;
-}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- arch_spinlock_t lockval = ACCESS_ONCE(*lock);
- return (lockval.next - lockval.owner) > 1;
+ " stlr %w1, %0\n"
+ : "=Q" (lock->lock) : "r" (0) : "memory");
}
-#define arch_spin_is_contended arch_spin_is_contended
/*
* Write lock implementation.
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index b8d3836..9a49434 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,19 +20,14 @@
# error "please don't include this file directly"
#endif
-#define TICKET_SHIFT 16
+/* We only require natural alignment for exclusive accesses. */
+#define __lock_aligned
typedef struct {
-#ifdef __AARCH64EB__
- u16 next;
- u16 owner;
-#else
- u16 owner;
- u16 next;
-#endif
-} __aligned(4) arch_spinlock_t;
+ volatile unsigned int lock;
+} arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 70ba9d4..89c047f 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -59,9 +59,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
- if (n == 0)
- return;
-
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -85,9 +82,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (n == 0)
- return;
-
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 720e70b..23a3c47 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,12 @@ static inline struct thread_info *current_thread_info(void)
#endif
/*
+ * We use bit 30 of the preempt_count to indicate that kernel
+ * preemption is occurring. See <asm/hardirq.h>.
+ */
+#define PREEMPT_ACTIVE 0x40000000
+
+/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SIGPENDING - signal pending
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 130e2be..26e310c 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,8 +18,7 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL1 (0xe11)
-#define BOOT_CPU_MODE_EL2 (0xe12)
+#define BOOT_CPU_MODE_EL2 (0x0e12b007)
#ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
deleted file mode 100644
index 2820f1a..0000000
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
-#define _ASM_ARM64_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-mapping.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
-{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
-#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index dc19e95..2b92046 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,10 +16,6 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
-#ifdef __AARCH64EB__
-#include <linux/byteorder/big_endian.h>
-#else
#include <linux/byteorder/little_endian.h>
-#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 9b12476..eea4975 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,7 +21,6 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
-#define HWCAP_EVTSTRM (1 << 2)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ba2fd4..7b4b564 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,12 +9,12 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o cpu_ops.o
+ hyp-stub.o psci.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index e7ee770..41b4f62 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -39,7 +39,6 @@ EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
deleted file mode 100644
index d62d12f..0000000
--- a/arch/arm64/kernel/cpu_ops.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * CPU kernel entry/exit control
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <asm/cpu_ops.h>
-#include <asm/smp_plat.h>
-#include <linux/errno.h>
-#include <linux/of.h>
-#include <linux/string.h>
-
-extern const struct cpu_operations smp_spin_table_ops;
-extern const struct cpu_operations cpu_psci_ops;
-
-const struct cpu_operations *cpu_ops[NR_CPUS];
-
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_SMP
- &smp_spin_table_ops,
- &cpu_psci_ops,
-#endif
- NULL,
-};
-
-static const struct cpu_operations * __init cpu_get_ops(const char *name)
-{
- const struct cpu_operations **ops = supported_cpu_ops;
-
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
-
- ops++;
- }
-
- return NULL;
-}
-
-/*
- * Read a cpu's enable method from the device tree and record it in cpu_ops.
- */
-int __init cpu_read_ops(struct device_node *dn, int cpu)
-{
- const char *enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- /*
- * The boot CPU may not have an enable method (e.g. when
- * spin-table is used for secondaries). Don't warn spuriously.
- */
- if (cpu != 0)
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
- return -ENOENT;
- }
-
- cpu_ops[cpu] = cpu_get_ops(enable_method);
- if (!cpu_ops[cpu]) {
- pr_warn("%s: unsupported enable-method property: %s\n",
- dn->full_name, enable_method);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-void __init cpu_read_bootcpu_ops(void)
-{
- struct device_node *dn = of_get_cpu_node(0, NULL);
- if (!dn) {
- pr_err("Failed to find device node for boot cpu\n");
- return;
- }
- cpu_read_ops(dn, 0);
-}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index fd3993c..63cfc4a 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void);
-struct cpu_info cpu_table[] = {
+struct cpu_info __initdata cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 4ae6857..cbfacf7 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
+#include <asm/local.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
@@ -88,8 +89,8 @@ early_param("nodebugmon", early_debug_disable);
* Keep track of debug users on each core.
* The ref counts are per-cpu so we use a local_t type.
*/
-static DEFINE_PER_CPU(int, mde_ref_count);
-static DEFINE_PER_CPU(int, kde_ref_count);
+static DEFINE_PER_CPU(local_t, mde_ref_count);
+static DEFINE_PER_CPU(local_t, kde_ref_count);
void enable_debug_monitors(enum debug_el el)
{
@@ -97,11 +98,11 @@ void enable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (this_cpu_inc_return(mde_ref_count) == 1)
+ if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
enable = DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- this_cpu_inc_return(kde_ref_count) == 1)
+ local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
enable |= DBG_MDSCR_KDE;
if (enable && debug_enabled) {
@@ -117,11 +118,11 @@ void disable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (this_cpu_dec_return(mde_ref_count) == 0)
+ if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
disable = ~DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- this_cpu_dec_return(kde_ref_count) == 0)
+ local_dec_and_test(&__get_cpu_var(kde_ref_count)))
disable &= ~DBG_MDSCR_KDE;
if (disable) {
@@ -248,8 +249,7 @@ static int brk_handler(unsigned long addr, unsigned int esr,
int aarch32_break_handler(struct pt_regs *regs)
{
siginfo_t info;
- u32 arm_instr;
- u16 thumb_instr;
+ unsigned int instr;
bool bp = false;
void __user *pc = (void __user *)instruction_pointer(regs);
@@ -258,21 +258,18 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
- get_user(thumb_instr, (u16 __user *)pc);
- thumb_instr = le16_to_cpu(thumb_instr);
- if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
+ get_user(instr, (u16 __user *)pc);
+ if (instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */
- get_user(thumb_instr, (u16 __user *)(pc + 2));
- thumb_instr = le16_to_cpu(thumb_instr);
- bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
+ get_user(instr, (u16 __user *)(pc + 2));
+ bp = instr == AARCH32_BREAK_THUMB2_HI;
} else {
- bp = thumb_instr == AARCH32_BREAK_THUMB;
+ bp = instr == AARCH32_BREAK_THUMB;
}
} else {
/* 32-bit ARM instruction */
- get_user(arm_instr, (u32 __user *)pc);
- arm_instr = le32_to_cpu(arm_instr);
- bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ get_user(instr, (u32 __user *)pc);
+ bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
}
if (!bp)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 4d2c6f3..3881fd1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -309,13 +309,16 @@ el1_irq:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
-
- irq_handler
-
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
- cbnz w24, 1f // preempt count != 0
+ ldr x24, [tsk, #TI_PREEMPT] // get preempt count
+ add x0, x24, #1 // increment it
+ str x0, [tsk, #TI_PREEMPT]
+#endif
+ irq_handler
+#ifdef CONFIG_PREEMPT
+ str x24, [tsk, #TI_PREEMPT] // restore preempt count
+ cbnz x24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
@@ -504,10 +507,22 @@ el0_irq_naked:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
-
- irq_handler
get_thread_info tsk
-
+#ifdef CONFIG_PREEMPT
+ ldr x24, [tsk, #TI_PREEMPT] // get preempt count
+ add x23, x24, #1 // increment it
+ str x23, [tsk, #TI_PREEMPT]
+#endif
+ irq_handler
+#ifdef CONFIG_PREEMPT
+ ldr x0, [tsk, #TI_PREEMPT]
+ str x24, [tsk, #TI_PREEMPT]
+ cmp x0, x23
+ b.eq 1f
+ mov x1, #0
+ str x1, [x1] // BUG
+1:
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c68cca5..7090c12 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -123,9 +123,8 @@
ENTRY(stext)
mov x21, x0 // x21=FDT
- bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl set_cpu_boot_mode_flag
+ bl el2_setup // Drop to EL1
mrs x22, midr_el1 // x22=cpuid
mov x0, x22
bl lookup_processor_type
@@ -151,30 +150,21 @@ ENDPROC(stext)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
- *
- * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
- * booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #PSR_MODE_EL2t
ccmp x0, #PSR_MODE_EL2h, #0x4, ne
- b.ne 1f
- mrs x0, sctlr_el2
-CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
-CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
- msr sctlr_el2, x0
- b 2f
-1: mrs x0, sctlr_el1
-CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
-CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
- msr sctlr_el1, x0
- mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
- isb
+ ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
+ add x0, x0, x28
+ b.eq 1f
+ str wzr, [x0] // Remember we don't have EL2...
ret
/* Hyp configuration. */
-2: mov x0, #(1 << 31) // 64-bit EL1
+1: ldr w1, =BOOT_CPU_MODE_EL2
+ str w1, [x0, #4] // This CPU has EL2
+ mov x0, #(1 << 31) // 64-bit EL1
msr hcr_el2, x0
/* Generic timers. */
@@ -191,8 +181,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
-CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
-CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
+ movk x0, #0x30d0, lsl #16
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -215,25 +204,10 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
PSR_MODE_EL1h)
msr spsr_el2, x0
msr elr_el2, lr
- mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
/*
- * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
- * in x20. See arch/arm64/include/asm/virt.h for more info.
- */
-ENTRY(set_cpu_boot_mode_flag)
- ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x1, x1, x28
- cmp w20, #BOOT_CPU_MODE_EL2
- b.ne 1f
- add x1, x1, #4
-1: str w20, [x1] // This CPU has booted in EL1
- ret
-ENDPROC(set_cpu_boot_mode_flag)
-
-/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
@@ -251,6 +225,7 @@ ENTRY(__boot_cpu_mode)
.quad PAGE_OFFSET
#ifdef CONFIG_SMP
+ .pushsection .smp.pen.text, "ax"
.align 3
1: .quad .
.quad secondary_holding_pen_release
@@ -260,9 +235,8 @@ ENTRY(__boot_cpu_mode)
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
- bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl set_cpu_boot_mode_flag
+ bl __calc_phys_offset // x24=phys offset
+ bl el2_setup // Drop to EL1
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
@@ -276,17 +250,7 @@ pen: ldr x4, [x3]
wfe
b pen
ENDPROC(secondary_holding_pen)
-
- /*
- * Secondary entry point that jumps straight into the kernel. Only to
- * be used where CPUs are brought online dynamically by the kernel.
- */
-ENTRY(secondary_entry)
- bl el2_setup // Drop to EL1
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl set_cpu_boot_mode_flag
- b secondary_startup
-ENDPROC(secondary_entry)
+ .popsection
ENTRY(secondary_startup)
/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index ff516f6..329218c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_reg = AARCH64_DBG_REG_BCR;
val_reg = AARCH64_DBG_REG_BVR;
- slots = this_cpu_ptr(bp_on_reg);
+ slots = __get_cpu_var(bp_on_reg);
max_slots = core_num_brps;
reg_enable = !debug_info->bps_disabled;
} else {
/* Watchpoint */
ctrl_reg = AARCH64_DBG_REG_WCR;
val_reg = AARCH64_DBG_REG_WVR;
- slots = this_cpu_ptr(wp_on_reg);
+ slots = __get_cpu_var(wp_on_reg);
max_slots = core_num_wrps;
reg_enable = !debug_info->wps_disabled;
}
@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */
base = AARCH64_DBG_REG_BCR;
- slots = this_cpu_ptr(bp_on_reg);
+ slots = __get_cpu_var(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
base = AARCH64_DBG_REG_WCR;
- slots = this_cpu_ptr(wp_on_reg);
+ slots = __get_cpu_var(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
switch (reg) {
case AARCH64_DBG_REG_BCR:
- slots = this_cpu_ptr(bp_on_reg);
+ slots = __get_cpu_var(bp_on_reg);
max_slots = core_num_brps;
break;
case AARCH64_DBG_REG_WCR:
- slots = this_cpu_ptr(wp_on_reg);
+ slots = __get_cpu_var(wp_on_reg);
max_slots = core_num_wrps;
break;
default:
@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
struct debug_info *debug_info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = this_cpu_ptr(bp_on_reg);
+ slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
addr = instruction_pointer(regs);
debug_info = &current->thread.debug;
@@ -596,7 +596,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
- kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+ kernel_step = &__get_cpu_var(stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = this_cpu_ptr(wp_on_reg);
+ slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
debug_info = &current->thread.debug;
for (i = 0; i < core_num_wrps; ++i) {
@@ -698,7 +698,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
- kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+ kernel_step = &__get_cpu_var(stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
struct debug_info *debug_info = &current->thread.debug;
int handled_exception = 0, *kernel_step;
- kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+ kernel_step = &__get_cpu_var(stepping_kernel_bp);
/*
* Called from single-step exception handler.
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 473e5db..ecb3354 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,64 +81,3 @@ void __init init_IRQ(void)
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = d->affinity;
- struct irq_chip *c;
- bool ret = false;
-
- /*
- * If this is a per-CPU interrupt, or the affinity does not
- * include this CPU, then we have nothing to do.
- */
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
- return false;
-
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
- ret = true;
- }
-
- c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(d->affinity, affinity);
-
- return ret;
-}
-
-/*
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
- unsigned int i;
- struct irq_desc *desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for_each_irq_desc(i, desc) {
- bool affinity_broken;
-
- raw_spin_lock(&desc->lock);
- affinity_broken = migrate_one_irq(desc);
- raw_spin_unlock(&desc->lock);
-
- if (affinity_broken)
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
- i, smp_processor_id());
- }
-
- local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 63c48ff..8b69ecb 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,9 +27,6 @@
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
-
-#include <asm/unistd32.h>
-
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -38,30 +35,33 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2]
+ .inst 0xf57ff05f // dmb sy
+ .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
+ .inst 0x01a23f96 // strexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
+ .inst 0xf57ff05f // dmb sy
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05b // dmb ish
+ .inst 0xf57ff05f // dmb sy
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xe1923e9f // 1: ldaex r3, [r2]
+ .inst 0xf57ff05f // dmb sy
+ .inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823e91 // stlexeq r3, r1, [r2]
+ .inst 0x01823f91 // strexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xe12fff1e // bx lr
+ .inst 0xeaffffef // b <__kuser_memory_barrier>
.align 5
__kuser_get_tls: // 0xffff0fe0
@@ -75,42 +75,3 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
-
-/*
- * AArch32 sigreturn code
- *
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- *
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
- .globl __aarch32_sigret_code_start
-__aarch32_sigret_code_start:
-
- /*
- * ARM Code
- */
- .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
-
- /*
- * ARM code
- */
- .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
-
- .globl __aarch32_sigret_code_end
-__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index e2ad0d8..ca0e3d5 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -29,7 +29,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
__builtin_return_address(0));
}
@@ -111,9 +111,6 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
u32 immlo, immhi, lomask, himask, mask;
int shift;
- /* The instruction stream is always little endian. */
- insn = le32_to_cpu(insn);
-
switch (type) {
case INSN_IMM_MOVNZ:
/*
@@ -182,7 +179,7 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
insn &= ~(mask << shift);
insn |= (imm & mask) << shift;
- return cpu_to_le32(insn);
+ return insn;
}
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 0e63c98..cea1594 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -784,8 +784,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
+#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
* Event filters for PMUv3
@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- cpuc = this_cpu_ptr(&cpu_hw_events);
+ cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1175,8 +1175,7 @@ static void armv8pmu_reset(void *info)
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
+ &armv8_pmuv3_perf_cache_map, 0xFF);
}
static struct arm_pmu armv8pmu = {
@@ -1258,7 +1257,7 @@ device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
- return this_cpu_ptr(&cpu_hw_events);
+ return &__get_cpu_var(cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index de17c89..7ae8a1f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -102,13 +102,6 @@ void arch_cpu_idle(void)
local_irq_enable();
}
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
- cpu_die();
-}
-#endif
-
void machine_shutdown(void)
{
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 4f97db3..14f73c4 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -17,32 +17,12 @@
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/smp.h>
#include <asm/compiler.h>
-#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
-#include <asm/smp_plat.h>
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-static struct psci_operations psci_ops;
+struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
@@ -229,68 +209,3 @@ out_put_node:
of_node_put(np);
return err;
}
-
-#ifdef CONFIG_SMP
-
-static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
-{
- return 0;
-}
-
-static int __init cpu_psci_cpu_prepare(unsigned int cpu)
-{
- if (!psci_ops.cpu_on) {
- pr_err("no cpu_on method, not booting CPU%d\n", cpu);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int cpu_psci_cpu_boot(unsigned int cpu)
-{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
- if (err)
- pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
-
- return err;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int cpu_psci_cpu_disable(unsigned int cpu)
-{
- /* Fail early if we don't have CPU_OFF support */
- if (!psci_ops.cpu_off)
- return -EOPNOTSUPP;
- return 0;
-}
-
-static void cpu_psci_cpu_die(unsigned int cpu)
-{
- int ret;
- /*
- * There are no known implementations of PSCI actually using the
- * power state field, pass a sensible default for now.
- */
- struct psci_power_state state = {
- .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
- };
-
- ret = psci_ops.cpu_off(state);
-
- pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret);
-}
-#endif
-
-const struct cpu_operations cpu_psci_ops = {
- .name = "psci",
- .cpu_init = cpu_psci_cpu_init,
- .cpu_prepare = cpu_psci_cpu_prepare,
- .cpu_boot = cpu_psci_cpu_boot,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = cpu_psci_cpu_disable,
- .cpu_die = cpu_psci_cpu_die,
-#endif
-};
-
-#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6777a21..fecdbf7 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -636,27 +636,28 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- compat_ulong_t reg;
+ void *reg;
switch (idx) {
case 15:
- reg = task_pt_regs(target)->pc;
+ reg = (void *)&task_pt_regs(target)->pc;
break;
case 16:
- reg = task_pt_regs(target)->pstate;
+ reg = (void *)&task_pt_regs(target)->pstate;
break;
case 17:
- reg = task_pt_regs(target)->orig_x0;
+ reg = (void *)&task_pt_regs(target)->orig_x0;
break;
default:
- reg = task_pt_regs(target)->regs[idx];
+ reg = (void *)&task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, &reg, sizeof(reg));
+ ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
+
if (ret)
break;
-
- ubuf += sizeof(reg);
+ else
+ ubuf += sizeof(compat_ulong_t);
}
return ret;
@@ -684,28 +685,28 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- compat_ulong_t reg;
-
- ret = copy_from_user(&reg, ubuf, sizeof(reg));
- if (ret)
- return ret;
-
- ubuf += sizeof(reg);
+ void *reg;
switch (idx) {
case 15:
- newregs.pc = reg;
+ reg = (void *)&newregs.pc;
break;
case 16:
- newregs.pstate = reg;
+ reg = (void *)&newregs.pstate;
break;
case 17:
- newregs.orig_x0 = reg;
+ reg = (void *)&newregs.orig_x0;
break;
default:
- newregs.regs[idx] = reg;
+ reg = (void *)&newregs.regs[idx];
}
+ ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
+
+ if (ret)
+ goto out;
+ else
+ ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
@@ -713,6 +714,7 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
+out:
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index bd9bbd0..055cfb8 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,7 +45,6 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
-#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -61,16 +60,6 @@ EXPORT_SYMBOL(processor_id);
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
-#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_HWCAP_DEFAULT \
- (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
- COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
-unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
-#endif
-
static const char *cpu_name;
static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
@@ -108,11 +97,6 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
-bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
-{
- return phys_id == cpu_logical_map(cpu);
-}
-
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
@@ -134,24 +118,76 @@ static void __init setup_processor(void)
printk("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
- sprintf(init_utsname()->machine, ELF_PLATFORM);
+ sprintf(init_utsname()->machine, "aarch64");
elf_hwcap = 0;
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
+ struct boot_param_header *devtree;
+ unsigned long dt_root;
+
+ /* Check we have a non-NULL DT pointer */
+ if (!dt_phys) {
early_print("\n"
- "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
+ "Error: NULL or invalid device tree blob\n"
"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
+ "\nPlease check your bootloader.\n");
+
+ while (true)
+ cpu_relax();
+
+ }
+
+ devtree = phys_to_virt(dt_phys);
+
+ /* Check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
+ early_print("\n"
+ "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
+ "Expected 0x%x, found 0x%x\n"
"\nPlease check your bootloader.\n",
- dt_phys, phys_to_virt(dt_phys));
+ dt_phys, devtree, OF_DT_HEADER,
+ be32_to_cpu(devtree->magic));
while (true)
cpu_relax();
}
- machine_name = of_flat_dt_get_machine_name();
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+
+ machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
+ if (!machine_name)
+ machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+ if (!machine_name)
+ machine_name = "<unknown>";
+ pr_info("Machine: %s\n", machine_name);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ base &= PAGE_MASK;
+ size &= PAGE_MASK;
+ if (base + size < PHYS_OFFSET) {
+ pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
+ if (base < PHYS_OFFSET) {
+ pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, PHYS_OFFSET);
+ size -= PHYS_OFFSET - base;
+ base = PHYS_OFFSET;
+ }
+ memblock_add(base, size);
}
/*
@@ -205,11 +241,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
- /*
- * Unmask asynchronous aborts early to catch possible system errors.
- */
- local_async_enable();
-
setup_processor();
setup_machine_fdt(__fdt_pointer);
@@ -233,7 +264,6 @@ void __init setup_arch(char **cmdline_p)
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
- cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
@@ -274,7 +304,6 @@ subsys_initcall(topology_init);
static const char *hwcap_str[] = {
"fp",
"asimd",
- "evtstrm",
NULL
};
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index b3fc9f5..e393174 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -100,6 +100,34 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+/*
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ */
+#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
+#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
+#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
+#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
+
+/*
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
+ 0x2700 | __NR_compat_sigreturn)
+#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
+ 0x2700 | __NR_compat_rt_sigreturn)
+
+const compat_ulong_t aarch32_sigret_code[6] = {
+ /*
+ * AArch32 sigreturn code.
+ * We don't construct an OABI SWI - instead we just set the imm24 field
+ * to the EABI syscall number so that we create a sane disassembly.
+ */
+ MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
+ MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
+};
+
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
@@ -122,7 +150,7 @@ static inline int get_sigset_t(sigset_t *set,
return 0;
}
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
@@ -446,13 +474,12 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
- if (thumb)
+ if (thumb) {
spsr |= COMPAT_PSR_T_BIT;
- else
+ spsr &= ~COMPAT_PSR_IT_MASK;
+ } else {
spsr &= ~COMPAT_PSR_T_BIT;
-
- /* The IT state must be cleared for both ARM and Thumb-2 */
- spsr &= ~COMPAT_PSR_IT_MASK;
+ }
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a0c2ca6..78db90d 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,7 +39,6 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
-#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -55,6 +54,7 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -63,16 +63,61 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- if (cpu_ops[cpu]->cpu_boot)
- return cpu_ops[cpu]->cpu_boot(cpu);
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ raw_spin_unlock(&boot_lock);
- return -EOPNOTSUPP;
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
}
static DECLARE_COMPLETION(cpu_running);
@@ -142,13 +187,17 @@ asmlinkage void secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- if (cpu_ops[cpu]->cpu_postboot)
- cpu_ops[cpu]->cpu_postboot();
+ /*
+ * Let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ write_pen_release(INVALID_HWID);
/*
- * Enable GIC and timers.
+ * Synchronise with the boot thread.
*/
- notify_cpu_starting(cpu);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -158,9 +207,13 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
+ /*
+ * Enable GIC and timers.
+ */
+ notify_cpu_starting(cpu);
+
local_irq_enable();
local_fiq_enable();
- local_async_enable();
/*
* OK, it's off to the idle thread for us
@@ -168,113 +221,39 @@ asmlinkage void secondary_start_kernel(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int op_cpu_disable(unsigned int cpu)
-{
- /*
- * If we don't have a cpu_die method, abort before we reach the point
- * of no return. CPU0 may not have an cpu_ops, so test for it.
- */
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
- return -EOPNOTSUPP;
-
- /*
- * We may need to abort a hot unplug for some other mechanism-specific
- * reason.
- */
- if (cpu_ops[cpu]->cpu_disable)
- return cpu_ops[cpu]->cpu_disable(cpu);
-
- return 0;
-}
-
-/*
- * __cpu_disable runs on the processor to be shutdown.
- */
-int __cpu_disable(void)
+void __init smp_cpus_done(unsigned int max_cpus)
{
- unsigned int cpu = smp_processor_id();
- int ret;
-
- ret = op_cpu_disable(cpu);
- if (ret)
- return ret;
-
- /*
- * Take this CPU offline. Once we clear this, we can't return,
- * and we must not schedule until we're ready to give up the cpu.
- */
- set_cpu_online(cpu, false);
-
- /*
- * OK - migrate IRQs away from this CPU
- */
- migrate_irqs();
-
- /*
- * Remove this CPU from the vm mask set of all processes.
- */
- clear_tasks_mm_cpumask(cpu);
-
- return 0;
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
}
-static DECLARE_COMPLETION(cpu_died);
-
-/*
- * called on the thread which is asking for a CPU to be shutdown -
- * waits until shutdown has completed, or it is timed out.
- */
-void __cpu_die(unsigned int cpu)
+void __init smp_prepare_boot_cpu(void)
{
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
- pr_crit("CPU%u: cpu didn't die\n", cpu);
- return;
- }
- pr_notice("CPU%u: shutdown\n", cpu);
}
-/*
- * Called from the idle thread for the CPU which has been shutdown.
- *
- * Note that we disable IRQs here, but do not re-enable them
- * before returning to the caller. This is also the behaviour
- * of the other hotplug-cpu capable cores, so presumably coming
- * out of idle fixes this.
- */
-void cpu_die(void)
-{
- unsigned int cpu = smp_processor_id();
-
- idle_task_exit();
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
- local_irq_disable();
+static const struct smp_enable_ops *enable_ops[] __initconst = {
+ &smp_spin_table_ops,
+ &smp_psci_ops,
+ NULL,
+};
- /* Tell __cpu_die() that this CPU is now safe to dispose of */
- complete(&cpu_died);
+static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
- /*
- * Actually shutdown the CPU. This must never fail. The specific hotplug
- * mechanism must perform all required cache maintenance to ensure that
- * no dirty lines are lost in the process of shutting down the CPU.
- */
- cpu_ops[cpu]->cpu_die(cpu);
+static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+{
+ const struct smp_enable_ops **ops = enable_ops;
- BUG();
-}
-#endif
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
-void __init smp_cpus_done(unsigned int max_cpus)
-{
- pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-}
+ ops++;
+ }
-void __init smp_prepare_boot_cpu(void)
-{
+ return NULL;
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -282,8 +261,9 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int);
*/
void __init smp_init_cpus(void)
{
+ const char *enable_method;
struct device_node *dn = NULL;
- unsigned int i, cpu = 1;
+ int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -352,10 +332,25 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
- if (cpu_read_ops(dn, cpu) != 0)
+ /*
+ * We currently support only the "spin-table" enable-method.
+ */
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
goto next;
+ }
+
+ smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
+
+ if (!smp_enable_ops[cpu]) {
+ pr_err("%s: invalid enable-method property: %s\n",
+ dn->full_name, enable_method);
+ goto next;
+ }
- if (cpu_ops[cpu]->cpu_init(dn, cpu))
+ if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -385,8 +380,8 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int err;
- unsigned int cpu, ncores = num_possible_cpus();
+ int cpu, err;
+ unsigned int ncores = num_possible_cpus();
/*
* are we trying to boot more cores than exist?
@@ -413,10 +408,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!cpu_ops[cpu])
+ if (!smp_enable_ops[cpu])
continue;
- err = cpu_ops[cpu]->cpu_prepare(cpu);
+ err = smp_enable_ops[cpu]->prepare_cpu(cpu);
if (err)
continue;
@@ -456,7 +451,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_online_cpu(cpu)
+ for_each_present_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
new file mode 100644
index 0000000..0c53330
--- /dev/null
+++ b/arch/arm64/kernel/smp_psci.c
@@ -0,0 +1,53 @@
+/*
+ * PSCI SMP initialisation
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+#include <asm/psci.h>
+#include <asm/smp_plat.h>
+
+static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
+{
+ return 0;
+}
+
+static int __init smp_psci_prepare_cpu(int cpu)
+{
+ int err;
+
+ if (!psci_ops.cpu_on) {
+ pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
+ if (err) {
+ pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct smp_enable_ops smp_psci_ops __initconst = {
+ .name = "psci",
+ .init_cpu = smp_psci_init_cpu,
+ .prepare_cpu = smp_psci_prepare_cpu,
+};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 44c2280..7c35fa6 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,39 +16,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
-#include <asm/cpu_ops.h>
-#include <asm/cputype.h>
-#include <asm/smp_plat.h>
-
-extern void secondary_holding_pen(void);
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
-
-static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
+static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
{
/*
* Determine the address from which the CPU is polling.
@@ -64,7 +40,7 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
return 0;
}
-static int smp_spin_table_cpu_prepare(unsigned int cpu)
+static int __init smp_spin_table_prepare_cpu(int cpu)
{
void **release_addr;
@@ -72,16 +48,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
-
- /*
- * We write the release address as LE regardless of the native
- * endianess of the kernel. Therefore, any boot-loaders that
- * read this address need to convert this address to the
- * boot-loader's endianess before jumping. This is mandated by
- * the boot protocol.
- */
- release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
-
+ release_addr[0] = (void *)__pa(secondary_holding_pen);
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -92,60 +59,8 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
return 0;
}
-static int smp_spin_table_cpu_boot(unsigned int cpu)
-{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
-
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
-}
-
-void smp_spin_table_cpu_postboot(void)
-{
- /*
- * Let the primary processor know we're out of the pen.
- */
- write_pen_release(INVALID_HWID);
-
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
-}
-
-const struct cpu_operations smp_spin_table_ops = {
+const struct smp_enable_ops smp_spin_table_ops __initconst = {
.name = "spin-table",
- .cpu_init = smp_spin_table_cpu_init,
- .cpu_prepare = smp_spin_table_cpu_prepare,
- .cpu_boot = smp_spin_table_cpu_boot,
- .cpu_postboot = smp_spin_table_cpu_postboot,
+ .init_cpu = smp_spin_table_init_cpu,
+ .prepare_cpu = smp_spin_table_prepare_cpu,
};
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index 423a5b3..a1b19ed 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* extension.
*/
compat_sys_pread64_wrapper:
- regs_to_64 x3, x4, x5
+ orr x3, x4, x5, lsl #32
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
compat_sys_pwrite64_wrapper:
- regs_to_64 x3, x4, x5
+ orr x3, x4, x5, lsl #32
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
compat_sys_truncate64_wrapper:
- regs_to_64 x1, x2, x3
+ orr x1, x2, x3, lsl #32
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
compat_sys_ftruncate64_wrapper:
- regs_to_64 x1, x2, x3
+ orr x1, x2, x3, lsl #32
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
compat_sys_readahead_wrapper:
- regs_to_64 x1, x2, x3
+ orr x1, x2, x3, lsl #32
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
compat_sys_fadvise64_64_wrapper:
mov w6, w1
- regs_to_64 x1, x2, x3
- regs_to_64 x2, x4, x5
+ orr x1, x2, x3, lsl #32
+ orr x2, x4, x5, lsl #32
mov w3, w6
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
compat_sys_sync_file_range2_wrapper:
- regs_to_64 x2, x2, x3
- regs_to_64 x3, x4, x5
+ orr x2, x2, x3, lsl #32
+ orr x3, x4, x5, lsl #32
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
compat_sys_fallocate_wrapper:
- regs_to_64 x2, x2, x3
- regs_to_64 x3, x4, x5
+ orr x2, x2, x3, lsl #32
+ orr x3, x4, x5, lsl #32
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 29c39d5..03dc371 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -61,6 +61,13 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
+static u64 sched_clock_mult __read_mostly;
+
+unsigned long long notrace sched_clock(void)
+{
+ return arch_timer_read_counter() * sched_clock_mult;
+}
+
void __init time_init(void)
{
u32 arch_timer_rate;
@@ -71,6 +78,9 @@ void __init time_init(void)
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
+ /* Cache the sched_clock multiplier to save a divide in the hot path. */
+ sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
+
/* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ;
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 65d40cf..6a389dc 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -58,10 +58,7 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC);
@@ -75,7 +72,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
+ aarch32_sigret_code, sizeof(aarch32_sigret_code));
flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5161ad9..f8ab9d8 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -54,6 +54,7 @@ SECTIONS
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
+ *(.smp.pen.text)
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -96,13 +97,30 @@ SECTIONS
PERCPU_SECTION(64)
__init_end = .;
-
- . = ALIGN(PAGE_SIZE);
- _data = .;
- __data_loc = _data - LOAD_OFFSET;
- _sdata = .;
- RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
- _edata = .;
+ . = ALIGN(THREAD_SIZE);
+ __data_loc = .;
+
+ .data : AT(__data_loc) {
+ _data = .; /* address in memory */
+ _sdata = .;
+
+ /*
+ * first, the init task union, aligned
+ * to an 8192 byte boundary.
+ */
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ CACHELINE_ALIGNED_DATA(64)
+ READ_MOSTLY_DATA(64)
+
+ /*
+ * and the usual data section
+ */
+ DATA_DATA
+ CONSTRUCTORS
+
+ _edata = .;
+ }
_edata_loc = __data_loc + SIZEOF(.data);
BSS_SECTION(0, 0, 0)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 4480ab3..21e9082 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -21,7 +21,6 @@ config KVM
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select ANON_INODES
- select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
select KVM_ARM_VGIC
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3f0731e..2c3ff67 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -248,26 +248,6 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
return kvm_reset_vcpu(vcpu);
}
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
-{
- int target = kvm_target_cpu();
-
- if (target < 0)
- return -ENODEV;
-
- memset(init, 0, sizeof(*init));
-
- /*
- * For now, we don't return any features.
- * In future, we might use features to return target
- * specific features available for the preferred
- * target type.
- */
- init->target = (__u32)target;
-
- return 0;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 8da5606..9beaca0 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -47,29 +47,21 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/**
- * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
- * instruction executed by a guest
- *
+ * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
* @vcpu: the vcpu pointer
*
- * WFE: Yield the CPU and come back to this vcpu when the scheduler
- * decides to.
- * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
-static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
- kvm_vcpu_on_spin(vcpu);
- else
- kvm_vcpu_block(vcpu);
-
+ kvm_vcpu_block(vcpu);
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
- [ESR_EL2_EC_WFI] = kvm_handle_wfx,
+ [ESR_EL2_EC_WFI] = kvm_handle_wfi,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 2b0244d..ba84e67 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -74,10 +74,7 @@ __do_hyp_init:
msr mair_el2, x4
isb
- mrs x4, sctlr_el2
- and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
- ldr x5, =SCTLR_EL2_FLAGS
- orr x4, x4, x5
+ mov x4, #SCTLR_EL2_FLAGS
msr sctlr_el2, x4
isb
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3b47c36..1ac0bbb 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -403,14 +403,6 @@ __kvm_hyp_code_start:
ldr w9, [x2, #GICH_ELRSR0]
ldr w10, [x2, #GICH_ELRSR1]
ldr w11, [x2, #GICH_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
-CPU_BE( rev w9, w9 )
-CPU_BE( rev w10, w10 )
-CPU_BE( rev w11, w11 )
str w4, [x3, #VGIC_CPU_HCR]
str w5, [x3, #VGIC_CPU_VMCR]
@@ -429,7 +421,6 @@ CPU_BE( rev w11, w11 )
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_CPU_LR
1: ldr w5, [x2], #4
-CPU_BE( rev w5, w5 )
str w5, [x3], #4
sub w4, w4, #1
cbnz w4, 1b
@@ -455,9 +446,6 @@ CPU_BE( rev w5, w5 )
ldr w4, [x3, #VGIC_CPU_HCR]
ldr w5, [x3, #VGIC_CPU_VMCR]
ldr w6, [x3, #VGIC_CPU_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
str w4, [x2, #GICH_HCR]
str w5, [x2, #GICH_VMCR]
@@ -468,7 +456,6 @@ CPU_BE( rev w6, w6 )
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_CPU_LR
1: ldr w5, [x3], #4
-CPU_BE( rev w5, w5 )
str w5, [x2], #4
sub w4, w4, #1
cbnz w4, 1b
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0cb8742..de2de5d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -31,6 +31,7 @@
#include <linux/sort.h>
#include <linux/of_fdt.h>
+#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -38,9 +39,17 @@
#include "mm.h"
+static unsigned long phys_initrd_start __initdata = 0;
+static unsigned long phys_initrd_size __initdata = 0;
+
phys_addr_t memstart_addr __read_mostly = 0;
-#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
+{
+ phys_initrd_start = start;
+ phys_initrd_size = end - start;
+}
+
static int __init early_initrd(char *p)
{
unsigned long start, size;
@@ -50,13 +59,12 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(start + size);
+ phys_initrd_start = start;
+ phys_initrd_size = size;
}
return 0;
}
early_param("initrd", early_initrd);
-#endif
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
@@ -129,8 +137,13 @@ void __init arm64_memblock_init(void)
/* Register the kernel text, kernel data and initrd with memblock */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+
+ /* Now convert initrd to virtual addresses */
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
#endif
/*
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 2bb1d58..1725cd6 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -77,24 +77,8 @@ EXPORT_SYMBOL(__ioremap);
void __iounmap(volatile void __iomem *io_addr)
{
- unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
- /*
- * We could get an address outside vmalloc range in case
- * of ioremap_cache() reusing a RAM mapping.
- */
- if (VMALLOC_START <= addr && addr < VMALLOC_END)
- vunmap((void *)addr);
+ vunmap(addr);
}
EXPORT_SYMBOL(__iounmap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
- /* For normal memory we already have a cacheable mapping. */
- if (pfn_valid(__phys_to_pfn(phys_addr)))
- return (void __iomem *)__phys_to_virt(phys_addr);
-
- return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ioremap_cache);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0f7fec5..b1b31bb 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -111,12 +111,12 @@ ENTRY(__cpu_setup)
bl __flush_dcache_all
mov lr, x28
ic iallu // I+BTB cache invalidate
- tlbi vmalle1is // invalidate I + D TLBs
dsb sy
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
msr mdscr_el1, xzr // Reset mdscr_el1
+ tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
*
@@ -162,9 +162,9 @@ ENDPROC(__cpu_setup)
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
+ * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x000802e2 // clear
+ .word 0x030802e2 // clear
.word 0x0405d11d // set
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index 74a8d87..be24040 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,2 @@
-xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
+xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o)
obj-y := xen-arm.o hypercall.o