diff options
Diffstat (limited to 'arch/arm/include/asm')
33 files changed, 758 insertions, 153 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 7ade91d..7c1bfc0 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -10,8 +10,7 @@ #include <clocksource/arm_arch_timer.h> #ifdef CONFIG_ARM_ARCH_TIMER -int arch_timer_of_register(void); -int arch_timer_sched_clock_init(void); +int arch_timer_arch_init(void); /* * These register accessors are marked inline so the compiler can @@ -110,16 +109,6 @@ static inline void __cpuinit arch_counter_set_user_access(void) asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); } -#else -static inline int arch_timer_of_register(void) -{ - return -ENXIO; -} - -static inline int arch_timer_sched_clock_init(void) -{ - return -ENXIO; -} #endif #endif diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index c79f61f..da1c77d 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -243,6 +243,29 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } +#ifdef CONFIG_ARM_LPAE +static inline u64 atomic64_read(const atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + __asm__ __volatile__("@ atomic64_set\n" +" strd %2, %H2, [%1]" + : "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + ); +} +#else static inline u64 atomic64_read(const atomic64_t *v) { u64 result; @@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i) : "r" (&v->counter), "r" (i) : "cc"); } +#endif static inline void atomic64_add(u64 i, atomic64_t *v) { diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c5..bff7138 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) flush_cache_all(); } +/* + * Memory synchronization helpers for mixed cached vs non cached accesses. + * + * Some synchronization algorithms have to set states in memory with the + * cache enabled or disabled depending on the code path. It is crucial + * to always ensure proper cache maintenance to update main memory right + * away in that case. + * + * Any cached write must be followed by a cache clean operation. + * Any cached read must be preceded by a cache invalidate operation. + * Yet, in the read case, a cache flush i.e. atomic clean+invalidate + * operation is needed to avoid discarding possible concurrent writes to the + * accessed memory. + * + * Also, in order to prevent a cached writer from interfering with an + * adjacent non-cached writer, each state variable must be located to + * a separate cache line. + */ + +/* + * This needs to be >= the max cache writeback size of all + * supported platforms included in the current kernel configuration. + * This is used to align state variables to their own cache lines. + */ +#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ +#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) + +/* + * There is no __cpuc_clean_dcache_area but we use it anyway for + * code intent clarity, and alias it to __cpuc_flush_dcache_area. + */ +#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area + +/* + * Ensure preceding writes to *p by this CPU are visible to + * subsequent reads by other CPUs: + */ +static inline void __sync_cache_range_w(volatile void *p, size_t size) +{ + char *_p = (char *)p; + + __cpuc_clean_dcache_area(_p, size); + outer_clean_range(__pa(_p), __pa(_p + size)); +} + +/* + * Ensure preceding writes to *p by other CPUs are visible to + * subsequent reads by this CPU. We must be careful not to + * discard data simultaneously written by another CPU, hence the + * usage of flush rather than invalidate operations. + */ +static inline void __sync_cache_range_r(volatile void *p, size_t size) +{ + char *_p = (char *)p; + +#ifdef CONFIG_OUTER_CACHE + if (outer_cache.flush_range) { + /* + * Ensure dirty data migrated from other CPUs into our cache + * are cleaned out safely before the outer cache is cleaned: + */ + __cpuc_clean_dcache_area(_p, size); + + /* Clean and invalidate stale data for *p from outer ... */ + outer_flush_range(__pa(_p), __pa(_p + size)); + } +#endif + + /* ... and inner cache: */ + __cpuc_flush_dcache_area(_p, size); +} + +#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) +#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) + #endif diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 5ef4d80..1f3262e 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -42,6 +42,8 @@ #define vectors_high() (0) #endif +#ifdef CONFIG_CPU_CP15 + extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */ @@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val) isb(); } -#endif +#else /* ifdef CONFIG_CPU_CP15 */ + +/* + * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the + * minds of the developers). Yielding 0 for machines without a cp15 (and making + * it read-only) is fine for most cases and saves quite some #ifdeffery. + */ +#define cr_no_alignment UL(0) +#define cr_alignment UL(0) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#endif /* ifndef __ASSEMBLY__ */ #endif diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index ad41ec2..7652712 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -38,6 +38,24 @@ #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) +#define ARM_CPU_IMP_ARM 0x41 +#define ARM_CPU_IMP_INTEL 0x69 + +#define ARM_CPU_PART_ARM1136 0xB360 +#define ARM_CPU_PART_ARM1156 0xB560 +#define ARM_CPU_PART_ARM1176 0xB760 +#define ARM_CPU_PART_ARM11MPCORE 0xB020 +#define ARM_CPU_PART_CORTEX_A8 0xC080 +#define ARM_CPU_PART_CORTEX_A9 0xC090 +#define ARM_CPU_PART_CORTEX_A5 0xC050 +#define ARM_CPU_PART_CORTEX_A15 0xC0F0 +#define ARM_CPU_PART_CORTEX_A7 0xC070 + +#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 +#define ARM_CPU_XSCALE_ARCH_V1 0x2000 +#define ARM_CPU_XSCALE_ARCH_V2 0x4000 +#define ARM_CPU_XSCALE_ARCH_V3 0x6000 + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 @@ -50,6 +68,7 @@ extern unsigned int processor_id; : "cc"); \ __val; \ }) + #define read_cpuid_ext(ext_reg) \ ({ \ unsigned int __val; \ @@ -59,29 +78,24 @@ extern unsigned int processor_id; : "cc"); \ __val; \ }) -#else -#define read_cpuid(reg) (processor_id) -#define read_cpuid_ext(reg) 0 -#endif -#define ARM_CPU_IMP_ARM 0x41 -#define ARM_CPU_IMP_INTEL 0x69 +#else /* ifdef CONFIG_CPU_CP15 */ -#define ARM_CPU_PART_ARM1136 0xB360 -#define ARM_CPU_PART_ARM1156 0xB560 -#define ARM_CPU_PART_ARM1176 0xB760 -#define ARM_CPU_PART_ARM11MPCORE 0xB020 -#define ARM_CPU_PART_CORTEX_A8 0xC080 -#define ARM_CPU_PART_CORTEX_A9 0xC090 -#define ARM_CPU_PART_CORTEX_A5 0xC050 -#define ARM_CPU_PART_CORTEX_A15 0xC0F0 -#define ARM_CPU_PART_CORTEX_A7 0xC070 +/* + * read_cpuid and read_cpuid_ext should only ever be called on machines that + * have cp15 so warn on other usages. + */ +#define read_cpuid(reg) \ + ({ \ + WARN_ON_ONCE(1); \ + 0; \ + }) -#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 -#define ARM_CPU_XSCALE_ARCH_V1 0x2000 -#define ARM_CPU_XSCALE_ARCH_V2 0x4000 -#define ARM_CPU_XSCALE_ARCH_V3 0x6000 +#define read_cpuid_ext(reg) read_cpuid(reg) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ +#ifdef CONFIG_CPU_CP15 /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID @@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) return read_cpuid(CPUID_ID); } +#else /* ifdef CONFIG_CPU_CP15 */ + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ + return processor_id; +} + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + static inline unsigned int __attribute_const__ read_cpuid_implementor(void) { return (read_cpuid_id() & 0xFF000000) >> 24; diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799f..dff714d 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h new file mode 100644 index 0000000..1563130 --- /dev/null +++ b/arch/arm/include/asm/firmware.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2012 Samsung Electronics. + * Kyungmin Park <kyungmin.park@samsung.com> + * Tomasz Figa <t.figa@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_FIRMWARE_H +#define __ASM_ARM_FIRMWARE_H + +#include <linux/bug.h> + +/* + * struct firmware_ops + * + * A structure to specify available firmware operations. + * + * A filled up structure can be registered with register_firmware_ops(). + */ +struct firmware_ops { + /* + * Enters CPU idle mode + */ + int (*do_idle)(void); + /* + * Sets boot address of specified physical CPU + */ + int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr); + /* + * Boots specified physical CPU + */ + int (*cpu_boot)(int cpu); + /* + * Initializes L2 cache + */ + int (*l2x0_init)(void); +}; + +/* Global pointer for current firmware_ops structure, can't be NULL. */ +extern const struct firmware_ops *firmware_ops; + +/* + * call_firmware_op(op, ...) + * + * Checks if firmware operation is present and calls it, + * otherwise returns -ENOSYS + */ +#define call_firmware_op(op, ...) \ + ((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS)) + +/* + * register_firmware_ops(ops) + * + * A function to register platform firmware_ops struct. + */ +static inline void register_firmware_ops(const struct firmware_ops *ops) +{ + BUG_ON(!ops); + + firmware_ops = ops; +} + +#endif diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15..ea289e1 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 8cacbcd..b6e9f2c 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -18,12 +18,12 @@ * ================ * * We have the following to choose from: - * arm6 - ARM6 style * arm7 - ARM7 style * v4_early - ARMv4 without Thumb early abort handler * v4t_late - ARMv4 with Thumb late abort handler * v4t_early - ARMv4 with Thumb early abort handler - * v5tej_early - ARMv5 with Thumb and Java early abort handler + * v5t_early - ARMv5 with Thumb early abort handler + * v5tj_early - ARMv5 with Thumb and Java early abort handler * xscale - ARMv5 with Thumb with Xscale extensions * v6_early - ARMv6 generic early abort handler * v7_early - ARMv7 generic early abort handler @@ -39,19 +39,19 @@ # endif #endif -#ifdef CONFIG_CPU_ABRT_LV4T +#ifdef CONFIG_CPU_ABRT_EV4 # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v4t_late_abort +# define CPU_DABORT_HANDLER v4_early_abort # endif #endif -#ifdef CONFIG_CPU_ABRT_EV4 +#ifdef CONFIG_CPU_ABRT_LV4T # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v4_early_abort +# define CPU_DABORT_HANDLER v4t_late_abort # endif #endif @@ -63,19 +63,19 @@ # endif #endif -#ifdef CONFIG_CPU_ABRT_EV5TJ +#ifdef CONFIG_CPU_ABRT_EV5T # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v5tj_early_abort +# define CPU_DABORT_HANDLER v5t_early_abort # endif #endif -#ifdef CONFIG_CPU_ABRT_EV5T +#ifdef CONFIG_CPU_ABRT_EV5TJ # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v5t_early_abort +# define CPU_DABORT_HANDLER v5tj_early_abort # endif #endif diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fb..ed94b1a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h index 2dd9d3f..bb28af7 100644 --- a/arch/arm/include/asm/hardware/timer-sp.h +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -1,15 +1,23 @@ +struct clk; + void __sp804_clocksource_and_sched_clock_init(void __iomem *, - const char *, int); + const char *, struct clk *, int); +void __sp804_clockevents_init(void __iomem *, unsigned int, + struct clk *, const char *); static inline void sp804_clocksource_init(void __iomem *base, const char *name) { - __sp804_clocksource_and_sched_clock_init(base, name, 0); + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); } static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, const char *name) { - __sp804_clocksource_and_sched_clock_init(base, name, 1); + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); } -void sp804_clockevents_init(void __iomem *, unsigned int, const char *); +static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) +{ + __sp804_clockevents_init(base, irq, NULL, name); + +} diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828..91b99ab 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h index 1a66f907..bf863ed 100644 --- a/arch/arm/include/asm/idmap.h +++ b/arch/arm/include/asm/idmap.h @@ -8,7 +8,6 @@ #define __idmap __section(.idmap.text) noinline notrace extern pgd_t *idmap_pgd; -extern pgd_t *hyp_pgd; void setup_mm_for_reboot(void); diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 35c21c3..53c15de 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -30,6 +30,11 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); void init_IRQ(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER +extern void (*handle_arch_irq)(struct pt_regs *); +extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +#endif + #endif #endif diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 7c3d813..124623e 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -211,4 +211,8 @@ #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_DABT_S1PTW (1U << 7) +#define HSR_DABT_CM (1U << 8) +#define HSR_DABT_EA (1U << 9) + #endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index e4956f4..18d5032 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[]; extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); #endif diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index fd61199..82b4bab 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -22,11 +22,12 @@ #include <linux/kvm_host.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> +#include <asm/kvm_arm.h> -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); -u32 *vcpu_spsr(struct kvm_vcpu *vcpu); +unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); +unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); +bool kvm_condition_valid(struct kvm_vcpu *vcpu); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); @@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) return 1; } -static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.regs.usr_regs.ARM_pc; } -static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) return reg == 15; } +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hsr; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hxfar; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; +} + +static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hyp_pc; +} + +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; +} + +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; +} + +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; +} + +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ + return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; +} + +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; +} + +/* Get Access Size from a data abort */ +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ + switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { + case 0: + return 1; + case 1: + return 2; + case 2: + return 4; + default: + kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); + return -EFAULT; + } +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_IL; +} + +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; +} + +static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; +} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d1736a5..57cb786 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; +struct kvm_vcpu_fault_info { + u32 hsr; /* Hyp Syndrome Register */ + u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ + u32 hpfar; /* Hyp IPA Fault Address Register */ + u32 hyp_pc; /* PC when exception was taken from Hyp mode */ +}; + +typedef struct vfp_hard_struct kvm_cpu_context_t; + struct kvm_vcpu_arch { struct kvm_regs regs; @@ -93,13 +102,13 @@ struct kvm_vcpu_arch { u32 midr; /* Exception Information */ - u32 hsr; /* Hyp Syndrome Register */ - u32 hxfar; /* Hyp Data/Inst Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ + struct kvm_vcpu_fault_info fault; /* Floating point registers (VFP and Advanced SIMD/NEON) */ struct vfp_hard_struct vfp_guest; - struct vfp_hard_struct *vfp_host; + + /* Host FP context */ + kvm_cpu_context_t *host_cpu_context; /* VGIC state */ struct vgic_cpu vgic_cpu; @@ -122,9 +131,6 @@ struct kvm_vcpu_arch { /* Interrupt related fields */ u32 irq_lines; /* IRQ and FIQ levels */ - /* Hyp exception information */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; @@ -181,4 +187,41 @@ struct kvm_one_reg; int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index); + +static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr, + unsigned long long pgd_ptr, + unsigned long hyp_stack_ptr, + unsigned long vector_ptr) +{ + /* + * Call initialization code, and switch to the full blown HYP + * code. The init code doesn't need to preserve these + * registers as r0-r3 are already callee saved according to + * the AAPCS. + * Note that we slightly misuse the prototype by casing the + * stack pointer to a void *. + * + * We don't have enough registers to perform the full init in + * one go. Install the boot PGD first, and then install the + * runtime PGD, stack pointer and vectors. The PGDs are always + * passed as the third argument, in order to be passed into + * r2-r3 to the init code (yes, this is compliant with the + * PCS!). + */ + + kvm_call_hyp(NULL, 0, boot_pgd_ptr); + + kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); +} + +static inline int kvm_arch_dev_ioctl_check_extension(long ext) +{ + return 0; +} + +int kvm_perf_init(void); +int kvm_perf_teardown(void); + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b..472ac70 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -19,9 +19,33 @@ #ifndef __ARM_KVM_MMU_H__ #define __ARM_KVM_MMU_H__ +#include <asm/memory.h> +#include <asm/page.h> + +/* + * We directly use the kernel VA for the HYP, as we can directly share + * the mapping (HTTBR "covers" TTBR1). + */ +#define HYP_PAGE_OFFSET_MASK UL(~0) +#define HYP_PAGE_OFFSET PAGE_OFFSET +#define KERN_TO_HYP(kva) (kva) + +/* + * Our virtual mapping for the boot-time MMU-enable code. Must be + * shared across all the page-tables. Conveniently, we use the vectors + * page, where no kernel data will ever be shared with HYP. + */ +#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) + +#ifndef __ASSEMBLY__ + +#include <asm/cacheflush.h> +#include <asm/pgalloc.h> + int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); -void free_hyp_pmds(void); +void free_boot_hyp_pgd(void); +void free_hyp_pgds(void); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); @@ -33,9 +57,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); +phys_addr_t kvm_mmu_get_boot_httbr(void); +phys_addr_t kvm_get_idmap_vector(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) +{ + pte_val(*pte) = new_pte; + /* + * flush_pmd_entry just takes a void pointer and cleans the necessary + * cache entries, so we can reuse the function for ptes. + */ + flush_pmd_entry(pte); +} + static inline bool kvm_is_write_fault(unsigned long hsr) { unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; @@ -47,4 +83,53 @@ static inline bool kvm_is_write_fault(unsigned long hsr) return true; } +static inline void kvm_clean_pgd(pgd_t *pgd) +{ + clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +} + +static inline void kvm_clean_pmd_entry(pmd_t *pmd) +{ + clean_pmd_entry(pmd); +} + +static inline void kvm_clean_pte(pte_t *pte) +{ + clean_pte_table(pte); +} + +static inline void kvm_set_s2pte_writable(pte_t *pte) +{ + pte_val(*pte) |= L_PTE_S2_RDWR; +} + +struct kvm; + +static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +{ + /* + * If we are going to insert an instruction page and the icache is + * either VIPT or PIPT, there is a potential problem where the host + * (or another VM) may have used the same page as this guest, and we + * read incorrect data from the icache. If we're using a PIPT cache, + * we can invalidate just that page, but if we are using a VIPT cache + * we need to invalidate the entire icache - damn shame - as written + * in the ARM ARM (DDI 0406C.b - Page B3-1393). + * + * VIVT caches are tagged using both the ASID and the VMID and doesn't + * need any kind of flushing (DDI 0406C.b - Page B3-1392). + */ + if (icache_is_pipt()) { + unsigned long hva = gfn_to_hva(kvm, gfn); + __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); + } else if (!icache_is_vivt_asid_tagged()) { + /* any kind of VIPT cache */ + __flush_icache_all(); + } +} + +#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) + +#endif /* !__ASSEMBLY__ */ + #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index ab97207..343744e 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -21,7 +21,6 @@ #include <linux/kernel.h> #include <linux/kvm.h> -#include <linux/kvm_host.h> #include <linux/irqreturn.h> #include <linux/spinlock.h> #include <linux/types.h> diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 18c8830..2092ee1 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -20,11 +20,6 @@ struct seq_file; extern void init_FIQ(int); extern int show_fiq_list(struct seq_file *, int); -#ifdef CONFIG_MULTI_IRQ_HANDLER -extern void (*handle_arch_irq)(struct pt_regs *); -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); -#endif - /* * This is for easy migration, but should be changed in the source */ @@ -35,35 +30,4 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) -#ifndef __ASSEMBLY__ -/* - * Entry/exit functions for chained handlers where the primary IRQ chip - * may implement either fasteoi or level-trigger flow control. - */ -static inline void chained_irq_enter(struct irq_chip *chip, - struct irq_desc *desc) -{ - /* FastEOI controllers require no action on entry. */ - if (chip->irq_eoi) - return; - - if (chip->irq_mask_ack) { - chip->irq_mask_ack(&desc->irq_data); - } else { - chip->irq_mask(&desc->irq_data); - if (chip->irq_ack) - chip->irq_ack(&desc->irq_data); - } -} - -static inline void chained_irq_exit(struct irq_chip *chip, - struct irq_desc *desc) -{ - if (chip->irq_eoi) - chip->irq_eoi(&desc->irq_data); - else - chip->irq_unmask(&desc->irq_data); -} -#endif - #endif diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 5cf2e97..7d2c3c8 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -30,6 +30,11 @@ struct hw_pci { void (*postinit)(void); u8 (*swizzle)(struct pci_dev *dev, u8 *pin); int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); }; /* @@ -51,6 +56,12 @@ struct pci_sys_data { u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ int (*map_irq)(const struct pci_dev *, u8, u8); + /* Resource alignement requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); void *private_data; /* platform controller private data */ }; diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 0000000..0f7b762 --- /dev/null +++ b/arch/arm/include/asm/mcpm.h @@ -0,0 +1,209 @@ +/* + * arch/arm/include/asm/mcpm.h + * + * Created by: Nicolas Pitre, April 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MCPM_H +#define MCPM_H + +/* + * Maximum number of possible clusters / CPUs per cluster. + * + * This should be sufficient for quite a while, while keeping the + * (assembly) code simpler. When this starts to grow then we'll have + * to consider dynamic allocation. + */ +#define MAX_CPUS_PER_CLUSTER 4 +#define MAX_NR_CLUSTERS 2 + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/cacheflush.h> + +/* + * Platform specific code should use this symbol to set up secondary + * entry location for processors to use when released from reset. + */ +extern void mcpm_entry_point(void); + +/* + * This is used to indicate where the given CPU from given cluster should + * branch once it is ready to re-enter the kernel using ptr, or NULL if it + * should be gated. A gated CPU is held in a WFE loop until its vector + * becomes non NULL. + */ +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); + +/* + * CPU/cluster power operations API for higher subsystems to use. + */ + +/** + * mcpm_cpu_power_up - make given CPU in given cluster runable + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * The identified CPU is brought out of reset. If the cluster was powered + * down then it is brought up as well, taking care not to let the other CPUs + * in the cluster run, and ensuring appropriate cluster setup. + * + * Caller must ensure the appropriate entry vector is initialized with + * mcpm_set_entry_vector() prior to calling this. + * + * This must be called in a sleepable context. However, the implementation + * is strongly encouraged to return early and let the operation happen + * asynchronously, especially when significant delays are expected. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_power_down - power the calling CPU down + * + * The calling CPU is powered down. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster is prepared for power-down too. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_power_down(void); + +/** + * mcpm_cpu_suspend - bring the calling CPU in a suspended state + * + * @expected_residency: duration in microseconds the CPU is expected + * to remain suspended, or 0 if unknown/infinity. + * + * The calling CPU is suspended. The expected residency argument is used + * as a hint by the platform specific backend to implement the appropriate + * sleep state level according to the knowledge it has on wake-up latency + * for the given hardware. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster may be prepared for power-down too, if the expected + * residency makes it worthwhile. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_suspend(u64 expected_residency); + +/** + * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up + * + * This lets the platform specific backend code perform needed housekeeping + * work. This must be called by the newly activated CPU as soon as it is + * fully operational in kernel space, before it enables interrupts. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_powered_up(void); + +/* + * Platform specific methods used in the implementation of the above API. + */ +struct mcpm_platform_ops { + int (*power_up)(unsigned int cpu, unsigned int cluster); + void (*power_down)(void); + void (*suspend)(u64); + void (*powered_up)(void); +}; + +/** + * mcpm_platform_register - register platform specific power methods + * + * @ops: mcpm_platform_ops structure to register + * + * An error is returned if the registration has been done previously. + */ +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); + +/* Synchronisation structures for coordinating safe cluster setup/teardown: */ + +/* + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + +extern unsigned long sync_phys; /* physical address of *mcpm_sync */ + +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); +void __mcpm_outbound_leave_critical(unsigned int cluster, int state); +bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); +int __mcpm_cluster_state(unsigned int cluster); + +int __init mcpm_sync_init( + void (*power_up_setup)(unsigned int affinity_level)); + +void __init mcpm_smp_set_ops(void); + +#else + +/* + * asm-offsets.h causes trouble when included in .c files, and cacheflush.h + * cannot be included in asm files. Let's work around the conflict like this. + */ +#include <asm/asm-offsets.h> +#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE + +#endif /* ! __ASSEMBLY__ */ + +/* Definitions for mcpm_sync_struct */ +#define CPU_DOWN 0x11 +#define CPU_COMING_UP 0x12 +#define CPU_UP 0x13 +#define CPU_GOING_DOWN 0x14 + +#define CLUSTER_DOWN 0x21 +#define CLUSTER_UP 0x22 +#define CLUSTER_GOING_DOWN 0x23 + +#define INBOUND_NOT_COMING_UP 0x31 +#define INBOUND_COMING_UP 0x32 + +/* + * Offsets for the mcpm_sync_struct members, for use in asm. + * We don't want to make them global to the kernel via asm-offsets.c. + */ +#define MCPM_SYNC_CLUSTER_CPUS 0 +#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE +#define MCPM_SYNC_CLUSTER_CLUSTER \ + (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) +#define MCPM_SYNC_CLUSTER_INBOUND \ + (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) +#define MCPM_SYNC_CLUSTER_SIZE \ + (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) + +#endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a661..a7b85e0 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd..86b8fe3 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4..9bcd262 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index e3f7572..3d520dd 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -11,4 +11,6 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern unsigned long long (*sched_clock_func)(void); + #endif diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 5a85f14..21a23e3 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void); extern unsigned int user_debug; -extern void disable_hlt(void); -extern void enable_hlt(void); - #endif /* !__ASSEMBLY__ */ #endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index cddda1f..1995d1a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +#define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c88..a3625d1 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include <asm/glue.h> -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -52,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -169,7 +166,7 @@ # define v6wbi_always_flags (-1UL) #endif -#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ TLB_V7_UIS_ASID | TLB_V7_UIS_BP) #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index e4ddfb3..141baa3 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -44,14 +44,6 @@ #define __ARCH_WANT_SYS_CLONE /* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") - -/* * Unimplemented (or alternatively implemented) syscalls */ #define __IGNORE_fadvise64_64 diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 5c27696..8b1f37b 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -2,6 +2,7 @@ #define _ASM_ARM_XEN_EVENTS_H #include <asm/ptrace.h> +#include <asm/atomic.h> enum ipi_vector { XEN_PLACEHOLDER_VECTOR, @@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->ARM_cpsr); } -/* - * We cannot use xchg because it does not support 8-byte - * values. However it is safe to use {ldr,dtd}exd directly because all - * platforms which Xen can run on support those instructions. - */ -static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val) -{ - xen_ulong_t oldval; - unsigned int tmp; - - wmb(); - asm volatile("@ xchg_xen_ulong\n" - "1: ldrexd %0, %H0, [%3]\n" - " strexd %1, %2, %H2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (oldval), "=&r" (tmp) - : "r" (val), "r" (ptr) - : "memory", "cc"); - return oldval; -} +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ + atomic64_t, \ + counter), (val)) #endif /* _ASM_ARM_XEN_EVENTS_H */ diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 8a82325..799f42e 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -46,6 +46,7 @@ int HYPERVISOR_event_channel_op(int cmd, void *arg); unsigned long HYPERVISOR_hvm_op(int op, void *arg); int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); +int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); static inline void MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, |