summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /arch/arm64/include
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_timer.h42
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/compat.h14
-rw-r--r--arch/arm64/include/asm/cpu_ops.h59
-rw-r--r--arch/arm64/include/asm/dma-mapping.h14
-rw-r--r--arch/arm64/include/asm/elf.h18
-rw-r--r--arch/arm64/include/asm/hwcap.h11
-rw-r--r--arch/arm64/include/asm/io.h14
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/kvm_arm.h8
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h61
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm64/include/asm/memory.h11
-rw-r--r--arch/arm64/include/asm/pgalloc.h9
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h35
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/prom.h1
-rw-r--r--arch/arm64/include/asm/psci.h19
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/spinlock.h83
-rw-r--r--arch/arm64/include/asm/spinlock_types.h15
-rw-r--r--arch/arm64/include/asm/syscall.h6
-rw-r--r--arch/arm64/include/asm/thread_info.h6
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h47
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
35 files changed, 132 insertions, 448 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 519f89f..79a642d 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -50,4 +50,3 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 9400596..c9f1d28 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -92,49 +92,19 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u32 arch_timer_get_cntkctl(void)
+static inline void arch_counter_set_user_access(void)
{
u32 cntkctl;
+
+ /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- return cntkctl;
-}
+ cntkctl &= ~((3 << 8) | (1 << 0));
-static inline void arch_timer_set_cntkctl(u32 cntkctl)
-{
+ /* Enable user access to the virtual counter and frequency. */
+ cntkctl |= (1 << 1);
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
-static inline void arch_counter_set_user_access(void)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
-
- /* Disable user access to the timers and the physical counter */
- /* Also disable virtual event stream */
- cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
- | ARCH_TIMER_USR_VT_ACCESS_EN
- | ARCH_TIMER_VIRT_EVT_EN
- | ARCH_TIMER_USR_PCT_ACCESS_EN);
-
- /* Enable user access to the virtual counter */
- cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
-
- arch_timer_set_cntkctl(cntkctl);
-}
-
-static inline void arch_timer_evtstrm_enable(int divider)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
- cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
- /* Set the divider and enable virtual event stream */
- cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
- | ARCH_TIMER_VIRT_EVT_EN;
- arch_timer_set_cntkctl(cntkctl);
- elf_hwcap |= HWCAP_EVTSTRM;
-#ifdef CONFIG_COMPAT
- compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
-#endif
-}
-
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index fd3e392..5aceb83 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -115,34 +115,3 @@ lr .req x30 // link register
.align 7
b \label
.endm
-
-/*
- * Select code when configured for BE.
- */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CPU_BE(code...) code
-#else
-#define CPU_BE(code...)
-#endif
-
-/*
- * Select code when configured for LE.
- */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CPU_LE(code...)
-#else
-#define CPU_LE(code...) code
-#endif
-
-/*
- * Define a macro that constructs a 64-bit value by concatenating two
- * 32-bit registers. Note that on big endian systems the order of the
- * registers is swapped.
- */
-#ifndef CONFIG_CPU_BIG_ENDIAN
- .macro regs_to_64, rd, lbits, hbits
-#else
- .macro regs_to_64, rd, hbits, lbits
-#endif
- orr \rd, \lbits, \hbits, lsl #32
- .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aa..8363644 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -126,6 +126,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long tmp, tmp2;
+
+ asm volatile("// atomic_clear_mask\n"
+"1: ldxr %0, %2\n"
+" bic %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
+" cbnz %w1, 1b"
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
+ : "Ir" (mask)
+ : "cc");
+}
+
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3914c0d..8a8ce0e 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -173,6 +173,4 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
-#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index fda2704..899af80 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,11 +26,7 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
-#ifdef __AARCH64EB__
-#define COMPAT_UTS_MACHINE "armv8b\0\0"
-#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
-#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -77,23 +73,13 @@ struct compat_timeval {
};
struct compat_stat {
-#ifdef __AARCH64EB__
- short st_dev;
- short __pad1;
-#else
compat_dev_t st_dev;
-#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
-#ifdef __AARCH64EB__
- short st_rdev;
- short __pad2;
-#else
compat_dev_t st_rdev;
-#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
deleted file mode 100644
index c4cdb5e..0000000
--- a/arch/arm64/include/asm/cpu_ops.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_CPU_OPS_H
-#define __ASM_CPU_OPS_H
-
-#include <linux/init.h>
-#include <linux/threads.h>
-
-struct device_node;
-
-/**
- * struct cpu_operations - Callback operations for hotplugging CPUs.
- *
- * @name: Name of the property as appears in a devicetree cpu node's
- * enable-method property.
- * @cpu_init: Reads any data necessary for a specific enable-method from the
- * devicetree, for a given cpu node and proposed logical id.
- * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
- * mechanism for doing so, tests whether it is possible to boot
- * the given CPU.
- * @cpu_boot: Boots a cpu into the kernel.
- * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
- * synchronisation. Called from the cpu being booted.
- * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
- * reason, which will cause the hot unplug to be aborted. Called
- * from the cpu to be killed.
- * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
- * cpu being killed.
- */
-struct cpu_operations {
- const char *name;
- int (*cpu_init)(struct device_node *, unsigned int);
- int (*cpu_prepare)(unsigned int);
- int (*cpu_boot)(unsigned int);
- void (*cpu_postboot)(void);
-#ifdef CONFIG_HOTPLUG_CPU
- int (*cpu_disable)(unsigned int cpu);
- void (*cpu_die)(unsigned int cpu);
-#endif
-};
-
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-extern int __init cpu_read_ops(struct device_node *dn, int cpu);
-extern void __init cpu_read_bootcpu_ops(void);
-
-#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index fd0c0c0..8d18100 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,15 +23,11 @@
#include <asm-generic/dma-coherent.h>
-#include <xen/xen.h>
-#include <asm/xen/hypervisor.h>
-
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
@@ -39,14 +35,6 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (xen_initial_domain())
- return xen_dma_ops;
- else
- return __generic_dma_ops(dev);
-}
-
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 01d3aab..e7fa87f 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -90,24 +90,11 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
-#ifdef __AARCH64EB__
-#define ELF_DATA ELFDATA2MSB
-#else
#define ELF_DATA ELFDATA2LSB
-#endif
#define ELF_ARCH EM_AARCH64
-/*
- * This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
- * intent than poking at uname or /proc/cpuinfo.
- */
#define ELF_PLATFORM_SIZE 16
-#ifdef __AARCH64EB__
-#define ELF_PLATFORM ("aarch64_be")
-#else
#define ELF_PLATFORM ("aarch64")
-#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -162,12 +149,7 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
-
-#ifdef __AARCH64EB__
-#define COMPAT_ELF_PLATFORM ("v8b")
-#else
#define COMPAT_ELF_PLATFORM ("v8l")
-#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6cddbb0..e2950b0 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,7 +30,6 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
-#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#ifndef __ASSEMBLY__
/*
@@ -38,11 +37,11 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-
-#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
-extern unsigned int compat_elf_hwcap;
-#endif
+#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
extern unsigned long elf_hwcap;
#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 5727697..1d12f89 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,14 +22,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
-#include <xen/xen.h>
-
/*
* Generic IO read/write. These perform native-endian accesses.
*/
@@ -227,9 +224,8 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
-extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
-#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
@@ -237,6 +233,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
+#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL))
#define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
@@ -266,12 +263,5 @@ extern int devmem_is_allowed(unsigned long pfn);
*/
#define xlate_dev_kmem_ptr(p) p
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index e1f7ecd..0332fc0 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,7 +4,6 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
-extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index b2fcfbc..aa11943 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -56,9 +56,6 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
-#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
-#define local_async_disable() asm("msr daifset, #4" : : : "memory")
-
/*
* Save the current interrupt enable state.
*/
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef47..a5f28e2 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -63,7 +63,6 @@
* TAC: Trap ACTLR
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
- * TWE: Trap WFE
* TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
@@ -73,9 +72,8 @@
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
-#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
- HCR_BSU_IS | HCR_FB | HCR_TAC | \
- HCR_AMO | HCR_IMO | HCR_FMO | \
+#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
@@ -244,6 +242,4 @@
#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
-#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
-
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc..eec0738 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -177,65 +177,4 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
-{
- return vcpu_sys_reg(vcpu, MPIDR_EL1);
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- if (vcpu_mode_is_32bit(vcpu))
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
- else
- vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- if (vcpu_mode_is_32bit(vcpu))
- return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
-
- return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- case 4:
- return be32_to_cpu(data & 0xffffffff);
- default:
- return be64_to_cpu(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- case 4:
- return cpu_to_be32(data & 0xffffffff);
- default:
- return cpu_to_be64(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5d85a02..0859a4d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -36,6 +36,11 @@
#define KVM_VCPU_MAX_FEATURES 2
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
struct kvm_vcpu;
int kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -146,7 +151,6 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 680f74e..efe609c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -91,7 +91,6 @@ int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
-#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
static inline bool kvm_is_write_fault(unsigned long esr)
{
@@ -117,18 +116,13 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= PTE_S2_RDWR;
}
-static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
-{
- pmd_val(*pmd) |= PMD_S2_RDWR;
-}
-
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
- unsigned long size)
+static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
{
if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range(hva, hva + size);
+ unsigned long hva = gfn_to_hva(kvm, gfn);
+ flush_icache_range(hva, hva + PAGE_SIZE);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3776217..20925bc 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,23 +33,18 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
- * (VA_BITS - 1))
+ * PAGE_OFFSET - the virtual address of the start of the kernel image.
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#ifdef CONFIG_ARM64_64K_PAGES
-#define VA_BITS (42)
-#else
-#define VA_BITS (39)
-#endif
-#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
+#define PAGE_OFFSET UL(0xffffffc000000000)
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
+#define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 9bea6e7..f214069 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -63,12 +63,9 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
struct page *pte;
pte = alloc_pages(PGALLOC_GFP, 0);
- if (!pte)
- return NULL;
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
+ if (pte)
+ pgtable_page_ctor(pte);
+
return pte;
}
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 2593b49..0a8ed3f 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 4TB in the 64KB page configuration.
+ * 512GB and therefore we only use 1024 entries in the PGD.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 8192
+#define PTRS_PER_PGD 1024
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b1d2e26..d57e668 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -43,7 +43,7 @@
* Section
*/
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
@@ -85,8 +85,6 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
-
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7f2b60a..f0bebc5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,16 +25,15 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
+#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
- /* bit 57 for PMD_SECT_SPLITTING */
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
+#define VMALLOC_START UL(0xffffff8000000000)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
@@ -255,7 +254,7 @@ static inline int has_transparent_hugepage(void)
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT
@@ -358,20 +357,18 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-8: swap type
- * bits 9-57: swap offset
+ * bits 0, 2: present (must both be zero)
+ * bit 3: PTE_FILE
+ * bits 4-8: swap type
+ * bits 9-63: swap offset
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 4
#define __SWP_TYPE_BITS 6
-#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
-#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -385,15 +382,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a file entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-57: file offset / PAGE_SIZE
+ * bits 0, 2: present (must both be zero)
+ * bit 3: PTE_FILE
+ * bits 4-63: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 4)
+#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 55
+#define PTE_FILE_MAX_BITS 60
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 45b20cd..ab239b2 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -107,11 +107,6 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
-
-#ifdef __AARCH64EB__
- regs->pstate |= COMPAT_PSR_E_BIT;
-#endif
-
regs->compat_sp = sp;
}
#endif
diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h
new file mode 100644
index 0000000..68b90e6
--- /dev/null
+++ b/arch/arm64/include/asm/prom.h
@@ -0,0 +1 @@
+/* Empty for now */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index e5312ea..0604237 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,6 +14,25 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
int psci_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 0e7fa49..0dacbbf 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,7 +42,6 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
-#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index a498f2c..4b8023c 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,14 +60,21 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_entry(void);
+extern void secondary_holding_pen(void);
+extern volatile unsigned long secondary_holding_pen_release;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-extern int __cpu_disable(void);
+struct device_node;
-extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
+struct smp_enable_ops {
+ const char *name;
+ int (*init_cpu)(struct device_node *, int);
+ int (*prepare_cpu)(int);
+};
+
+extern const struct smp_enable_ops smp_spin_table_ops;
+extern const struct smp_enable_ops smp_psci_ops;
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf06..0defa07 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,10 +22,17 @@
/*
* Spinlock implementation.
*
+ * The old value is read exclusively and the new one, if unlocked, is written
+ * exclusively. In case of failure, the loop is restarted.
+ *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
+ *
+ * Unlocked value: 0
+ * Locked value: 1
*/
+#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -34,51 +41,32 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
- arch_spinlock_t lockval, newval;
asm volatile(
- /* Atomically increment the next ticket. */
-" prfm pstl1strm, %3\n"
-"1: ldaxr %w0, %3\n"
-" add %w1, %w0, %w5\n"
-" stxr %w2, %w1, %3\n"
-" cbnz %w2, 1b\n"
- /* Did we get the lock? */
-" eor %w1, %w0, %w0, ror #16\n"
-" cbz %w1, 3f\n"
- /*
- * No: spin on the owner. Send a local event to avoid missing an
- * unlock before the exclusive load.
- */
-" sevl\n"
-"2: wfe\n"
-" ldaxrh %w2, %4\n"
-" eor %w1, %w2, %w0, lsr #16\n"
-" cbnz %w1, 2b\n"
- /* We got the lock. Critical section starts here. */
-"3:"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
- : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
- : "memory");
+ " sevl\n"
+ "1: wfe\n"
+ "2: ldaxr %w0, %1\n"
+ " cbnz %w0, 1b\n"
+ " stxr %w0, %w2, %1\n"
+ " cbnz %w0, 2b\n"
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
- arch_spinlock_t lockval;
asm volatile(
-" prfm pstl1strm, %2\n"
-"1: ldaxr %w0, %2\n"
-" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 2f\n"
-" add %w0, %w0, %3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b\n"
-"2:"
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- : "I" (1 << TICKET_SHIFT)
- : "memory");
+ "2: ldaxr %w0, %1\n"
+ " cbnz %w0, 1f\n"
+ " stxr %w0, %w2, %1\n"
+ " cbnz %w0, 2b\n"
+ "1:\n"
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
return !tmp;
}
@@ -86,28 +74,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
-" stlrh %w1, %0\n"
- : "=Q" (lock->owner)
- : "r" (lock->owner + 1)
- : "memory");
-}
-
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.owner == lock.next;
-}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- arch_spinlock_t lockval = ACCESS_ONCE(*lock);
- return (lockval.next - lockval.owner) > 1;
+ " stlr %w1, %0\n"
+ : "=Q" (lock->lock) : "r" (0) : "memory");
}
-#define arch_spin_is_contended arch_spin_is_contended
/*
* Write lock implementation.
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index b8d3836..9a49434 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,19 +20,14 @@
# error "please don't include this file directly"
#endif
-#define TICKET_SHIFT 16
+/* We only require natural alignment for exclusive accesses. */
+#define __lock_aligned
typedef struct {
-#ifdef __AARCH64EB__
- u16 next;
- u16 owner;
-#else
- u16 owner;
- u16 next;
-#endif
-} __aligned(4) arch_spinlock_t;
+ volatile unsigned int lock;
+} arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 70ba9d4..89c047f 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -59,9 +59,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
- if (n == 0)
- return;
-
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -85,9 +82,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (n == 0)
- return;
-
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 720e70b..23a3c47 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,12 @@ static inline struct thread_info *current_thread_info(void)
#endif
/*
+ * We use bit 30 of the preempt_count to indicate that kernel
+ * preemption is occurring. See <asm/hardirq.h>.
+ */
+#define PREEMPT_ACTIVE 0x40000000
+
+/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SIGPENDING - signal pending
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 130e2be..26e310c 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,8 +18,7 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL1 (0xe11)
-#define BOOT_CPU_MODE_EL2 (0xe12)
+#define BOOT_CPU_MODE_EL2 (0x0e12b007)
#ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
deleted file mode 100644
index 2820f1a..0000000
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
-#define _ASM_ARM64_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-mapping.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
-{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
-#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index dc19e95..2b92046 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,10 +16,6 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
-#ifdef __AARCH64EB__
-#include <linux/byteorder/big_endian.h>
-#else
#include <linux/byteorder/little_endian.h>
-#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 9b12476..eea4975 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,7 +21,6 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
-#define HWCAP_EVTSTRM (1 << 2)
#endif /* _UAPI__ASM_HWCAP_H */