summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild2
-rw-r--r--arch/arm/include/asm/arch_timer.h36
-rw-r--r--arch/arm/include/asm/assembler.h7
-rw-r--r--arch/arm/include/asm/atomic.h108
-rw-r--r--arch/arm/include/asm/bL_switcher.h77
-rw-r--r--arch/arm/include/asm/bug.h10
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/include/asm/cmpxchg.h58
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h54
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h8
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h30
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-gpio.h75
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h12
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h4
-rw-r--r--arch/arm/include/asm/io.h9
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/kvm_arm.h9
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h51
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h4
-rw-r--r--arch/arm/include/asm/mcpm.h39
-rw-r--r--arch/arm/include/asm/memory.h85
-rw-r--r--arch/arm/include/asm/mmu.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h12
-rw-r--r--arch/arm/include/asm/pgtable-2level.h7
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/processor.h33
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/sched_clock.h4
-rw-r--r--arch/arm/include/asm/setup.h2
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/thread_info.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h48
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h50
-rw-r--r--arch/arm/include/asm/xen/page.h44
45 files changed, 320 insertions, 699 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index c38b58c..59ceae8 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,7 +24,6 @@ generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
-generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
@@ -33,4 +32,3 @@ generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
-generic-y += preempt.h
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 0704e0c..5665134 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -87,43 +87,17 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
-static inline u32 arch_timer_get_cntkctl(void)
+static inline void arch_counter_set_user_access(void)
{
u32 cntkctl;
+
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
- return cntkctl;
-}
-static inline void arch_timer_set_cntkctl(u32 cntkctl)
-{
- asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
-}
+ /* disable user access to everything */
+ cntkctl &= ~((3 << 8) | (7 << 0));
-static inline void arch_counter_set_user_access(void)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
-
- /* Disable user access to both physical/virtual counters/timers */
- /* Also disable virtual event stream */
- cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
- | ARCH_TIMER_USR_VT_ACCESS_EN
- | ARCH_TIMER_VIRT_EVT_EN
- | ARCH_TIMER_USR_VCT_ACCESS_EN
- | ARCH_TIMER_USR_PCT_ACCESS_EN);
- arch_timer_set_cntkctl(cntkctl);
-}
-
-static inline void arch_timer_evtstrm_enable(int divider)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
- cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
- /* Set the divider and enable virtual event stream */
- cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
- | ARCH_TIMER_VIRT_EVT_EN;
- arch_timer_set_cntkctl(cntkctl);
- elf_hwcap |= HWCAP_EVTSTRM;
+ asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
}
-
#endif
#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c22851..fcc1b5b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,13 +53,6 @@
#define put_byte_3 lsl #0
#endif
-/* Select code for any configuration running in BE8 mode */
-#ifdef CONFIG_CPU_ENDIAN_BE8
-#define ARM_BE8(code...) code
-#else
-#define ARM_BE8(code...)
-#endif
-
/*
* Data preload for architectures that support it
*/
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 62d2cb5..da1c77d 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,7 +12,6 @@
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
-#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
@@ -42,7 +41,6 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp;
int result;
- prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
@@ -81,7 +79,6 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp;
int result;
- prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
@@ -117,8 +114,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
- int oldval;
- unsigned long res;
+ unsigned long oldval, res;
smp_mb();
@@ -138,6 +134,21 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long tmp, tmp2;
+
+ __asm__ __volatile__("@ atomic_clear_mask\n"
+"1: ldrex %0, [%3]\n"
+" bic %0, %0, %4\n"
+" strex %1, %0, [%3]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+ : "r" (addr), "Ir" (mask)
+ : "cc");
+}
+
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
@@ -186,6 +197,15 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ *addr &= ~mask;
+ raw_local_irq_restore(flags);
+}
+
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -218,15 +238,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
- long long counter;
+ u64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline long long atomic64_read(const atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
{
- long long result;
+ u64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrd %0, %H0, [%1]"
@@ -237,7 +257,7 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, u64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -246,9 +266,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
);
}
#else
-static inline long long atomic64_read(const atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
{
- long long result;
+ u64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
@@ -259,11 +279,10 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, u64 i)
{
- long long tmp;
+ u64 tmp;
- prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
@@ -275,16 +294,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
}
#endif
-static inline void atomic64_add(long long i, atomic64_t *v)
+static inline void atomic64_add(u64 i, atomic64_t *v)
{
- long long result;
+ u64 result;
unsigned long tmp;
- prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %Q0, %Q0, %Q4\n"
-" adc %R0, %R0, %R4\n"
+" adds %0, %0, %4\n"
+" adc %H0, %H0, %H4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -293,17 +311,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
: "cc");
}
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
+static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
{
- long long result;
+ u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %Q0, %Q0, %Q4\n"
-" adc %R0, %R0, %R4\n"
+" adds %0, %0, %4\n"
+" adc %H0, %H0, %H4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -316,16 +334,15 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return result;
}
-static inline void atomic64_sub(long long i, atomic64_t *v)
+static inline void atomic64_sub(u64 i, atomic64_t *v)
{
- long long result;
+ u64 result;
unsigned long tmp;
- prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %Q0, %Q0, %Q4\n"
-" sbc %R0, %R0, %R4\n"
+" subs %0, %0, %4\n"
+" sbc %H0, %H0, %H4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -334,17 +351,17 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
: "cc");
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
{
- long long result;
+ u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %Q0, %Q0, %Q4\n"
-" sbc %R0, %R0, %R4\n"
+" subs %0, %0, %4\n"
+" sbc %H0, %H0, %H4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -357,10 +374,9 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
return result;
}
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- long long new)
+static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
{
- long long oldval;
+ u64 oldval;
unsigned long res;
smp_mb();
@@ -382,9 +398,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
return oldval;
}
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
{
- long long result;
+ u64 result;
unsigned long tmp;
smp_mb();
@@ -403,18 +419,18 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
return result;
}
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline u64 atomic64_dec_if_positive(atomic64_t *v)
{
- long long result;
+ u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %Q0, %Q0, #1\n"
-" sbc %R0, %R0, #0\n"
-" teq %R0, #0\n"
+" subs %0, %0, #1\n"
+" sbc %H0, %H0, #0\n"
+" teq %H0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
@@ -429,9 +445,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
{
- long long val;
+ u64 val;
unsigned long tmp;
int ret = 1;
@@ -443,8 +459,8 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
-" adds %Q0, %Q0, %Q6\n"
-" adc %R0, %R0, %R6\n"
+" adds %0, %0, %6\n"
+" adc %H0, %H0, %H6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
deleted file mode 100644
index 1714800..0000000
--- a/arch/arm/include/asm/bL_switcher.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * arch/arm/include/asm/bL_switcher.h
- *
- * Created by: Nicolas Pitre, April 2012
- * Copyright: (C) 2012-2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ASM_BL_SWITCHER_H
-#define ASM_BL_SWITCHER_H
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-typedef void (*bL_switch_completion_handler)(void *cookie);
-
-int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
- bL_switch_completion_handler completer,
- void *completer_cookie);
-static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
-{
- return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
-}
-
-/*
- * Register here to be notified about runtime enabling/disabling of
- * the switcher.
- *
- * The notifier chain is called with the switcher activation lock held:
- * the switcher will not be enabled or disabled during callbacks.
- * Callbacks must not call bL_switcher_{get,put}_enabled().
- */
-#define BL_NOTIFY_PRE_ENABLE 0
-#define BL_NOTIFY_POST_ENABLE 1
-#define BL_NOTIFY_PRE_DISABLE 2
-#define BL_NOTIFY_POST_DISABLE 3
-
-#ifdef CONFIG_BL_SWITCHER
-
-int bL_switcher_register_notifier(struct notifier_block *nb);
-int bL_switcher_unregister_notifier(struct notifier_block *nb);
-
-/*
- * Use these functions to temporarily prevent enabling/disabling of
- * the switcher.
- * bL_switcher_get_enabled() returns true if the switcher is currently
- * enabled. Each call to bL_switcher_get_enabled() must be followed
- * by a call to bL_switcher_put_enabled(). These functions are not
- * recursive.
- */
-bool bL_switcher_get_enabled(void);
-void bL_switcher_put_enabled(void);
-
-int bL_switcher_trace_trigger(void);
-int bL_switcher_get_logical_index(u32 mpidr);
-
-#else
-static inline int bL_switcher_register_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline bool bL_switcher_get_enabled(void) { return false; }
-static inline void bL_switcher_put_enabled(void) { }
-static inline int bL_switcher_trace_trigger(void) { return 0; }
-static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
-#endif /* CONFIG_BL_SWITCHER */
-
-#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index b274bde..7af5c6c 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,8 +2,6 @@
#define _ASMARM_BUG_H
#include <linux/linkage.h>
-#include <linux/types.h>
-#include <asm/opcodes.h>
#ifdef CONFIG_BUG
@@ -14,10 +12,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR(__value) __inst_thumb16(__value)
+#define BUG_INSTR_TYPE ".hword "
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR(__value) __inst_arm(__value)
+#define BUG_INSTR_TYPE ".word "
#endif
@@ -35,7 +33,7 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR(__value) "\n" \
+ asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
@@ -50,7 +48,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR(__value) "\n"); \
+ asm volatile(BUG_INSTR_TYPE #__value); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index ee753f1..15f2d5b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,50 +435,4 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
-/*
- * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
- * To do so we must:
- *
- * - Clear the SCTLR.C bit to prevent further cache allocations
- * - Flush the desired level of cache
- * - Clear the ACTLR "SMP" bit to disable local coherency
- *
- * ... and so without any intervening memory access in between those steps,
- * not even to the stack.
- *
- * WARNING -- After this has been called:
- *
- * - No ldrex/strex (and similar) instructions must be used.
- * - The CPU is obviously no longer coherent with the other CPUs.
- * - This is unlikely to work as expected if Linux is running non-secure.
- *
- * Note:
- *
- * - This is known to apply to several ARMv7 processor implementations,
- * however some exceptions may exist. Caveat emptor.
- *
- * - The clobber list is dictated by the call to v7_flush_dcache_*.
- * fp is preserved to the stack explicitly prior disabling the cache
- * since adding it to the clobber list is incompatible with having
- * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
- * trampoline are inserted by the linker and to keep sp 64-bit aligned.
- */
-#define v7_exit_coherency_flush(level) \
- asm volatile( \
- "stmfd sp!, {fp, ip} \n\t" \
- "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
- "bic r0, r0, #"__stringify(CR_C)" \n\t" \
- "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
- "isb \n\t" \
- "bl v7_flush_dcache_"__stringify(level)" \n\t" \
- "clrex \n\t" \
- "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
- "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
- "isb \n\t" \
- "dsb \n\t" \
- "ldmfd sp!, {fp, ip}" \
- : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
- "r9","r10","lr","memory" )
-
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index df2fbba..4f009c1 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,42 +223,6 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- unsigned long long oldval;
- unsigned long res;
-
- __asm__ __volatile__(
-"1: ldrexd %1, %H1, [%3]\n"
-" teq %1, %4\n"
-" teqeq %H1, %H4\n"
-" bne 2f\n"
-" strexd %0, %5, %H5, [%3]\n"
-" teq %0, #0\n"
-" bne 1b\n"
-"2:"
- : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
- : "r" (ptr), "r" (old), "r" (new)
- : "cc");
-
- return oldval;
-}
-
-static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- unsigned long long ret;
-
- smp_mb();
- ret = __cmpxchg64(ptr, old, new);
- smp_mb();
-
- return ret;
-}
-
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
@@ -266,16 +230,18 @@ static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_relaxed(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+ ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
+ atomic64_t, \
+ counter), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
+ local64_t, \
+ a), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index acdde76..9672e97 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,7 +10,6 @@
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
-#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index e701a4d..5b579b9 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -11,28 +11,17 @@
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
-#include <xen/xen.h>
-#include <asm/xen/hypervisor.h>
-
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (xen_initial_domain())
- return xen_dma_ops;
- else
- return __generic_dma_ops(dev);
-}
-
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
@@ -75,7 +64,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
-
#else
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
@@ -98,46 +86,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif
-/* The ARM override for dma_max_pfn() */
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
-}
-#define dma_max_pfn(dev) dma_max_pfn(dev)
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- unsigned int offset = paddr & ~PAGE_MASK;
- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- unsigned int offset = dev_addr & ~PAGE_MASK;
- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- u64 limit, mask;
-
- if (!dev->dma_mask)
- return 0;
-
- mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit)
- return 0;
-
- if ((addr | (addr + size - 1)) & ~mask)
- return 0;
-
- return 1;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) { }
-
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index fe3ea77..2740c2a 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 8
+#define NR_IPI 6
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index ad774f3..0cf7a6b 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
#define TRACER_TIMEOUT 10000
#define etm_writel(t, v, x) \
- (writel_relaxed((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
+ (__raw_writel((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -142,8 +142,8 @@
#define ETBFF_TRIGFL BIT(10)
#define etb_writel(t, v, x) \
- (writel_relaxed((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
+ (__raw_writel((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 240b29e..9b28f12 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -393,6 +393,36 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt;
}
+static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
+{
+ return 0;
+}
+
+static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return hw_desc.dma->dest_addr;
+ case AAU_ID:
+ return hw_desc.aau->dest_addr;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+
+static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ BUG();
+ return 0;
+}
+
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h
new file mode 100644
index 0000000..9eda7dc
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iop3xx-gpio.h
@@ -0,0 +1,75 @@
+/*
+ * arch/arm/include/asm/hardware/iop3xx-gpio.h
+ *
+ * IOP3xx GPIO wrappers
+ *
+ * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Based on IXP4XX gpio.h file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
+#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
+
+#include <mach/hardware.h>
+#include <asm-generic/gpio.h>
+
+#define __ARM_GPIOLIB_COMPLEX
+
+#define IOP3XX_N_GPIOS 8
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ if (gpio > IOP3XX_N_GPIOS)
+ return __gpio_get_value(gpio);
+
+ return gpio_line_get(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ if (gpio > IOP3XX_N_GPIOS) {
+ __gpio_set_value(gpio, value);
+ return;
+ }
+ gpio_line_set(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ if (gpio < IOP3XX_N_GPIOS)
+ return 0;
+ else
+ return __gpio_cansleep(gpio);
+}
+
+/*
+ * The GPIOs are not generating any interrupt
+ * Note : manuals are not clear about this
+ */
+static inline int gpio_to_irq(int gpio)
+{
+ return -EINVAL;
+}
+
+static inline int irq_to_gpio(int gpio)
+{
+ return -EINVAL;
+}
+
+#endif
+
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 2594a95..423744b 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -18,9 +18,16 @@
/*
* IOP3XX GPIO handling
*/
+#define GPIO_IN 0
+#define GPIO_OUT 1
+#define GPIO_LOW 0
+#define GPIO_HIGH 1
#define IOP3XX_GPIO_LINE(x) (x)
#ifndef __ASSEMBLY__
+extern void gpio_line_config(int line, int direction);
+extern int gpio_line_get(int line);
+extern void gpio_line_set(int line, int value);
extern int init_atu;
extern int iop3xx_get_init_atu(void);
#endif
@@ -161,6 +168,11 @@ extern int iop3xx_get_init_atu(void);
/* PERCR0 DOESN'T EXIST - index from 1! */
#define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
+/* General Purpose I/O */
+#define IOP3XX_GPOE (volatile u32 *)IOP3XX_GPIO_REG(0x0000)
+#define IOP3XX_GPID (volatile u32 *)IOP3XX_GPIO_REG(0x0004)
+#define IOP3XX_GPOD (volatile u32 *)IOP3XX_GPIO_REG(0x0008)
+
/* Timers */
#define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
#define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004)
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 250760e..122f86d 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -82,6 +82,8 @@ struct iop_adma_chan {
* @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation
* @idx: pool index
+ * @unmap_src_cnt: number of xor sources
+ * @unmap_len: transaction bytecount
* @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
@@ -97,6 +99,8 @@ struct iop_adma_desc_slot {
u16 slot_cnt;
u16 slots_per_op;
u16 idx;
+ u16 unmap_src_cnt;
+ size_t unmap_len;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3c597c2..d070741 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,11 +24,9 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
-#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -374,13 +372,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 0a9d5dd..48066ce 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,7 +11,6 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
-#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -42,7 +41,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(__inst_arm(0xe7ffdeff));
+ asm(".word 0xe7ffdeff");
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 1d3153c..64e9696 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -57,7 +57,6 @@
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
- * TWE: Trap WFE
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
@@ -68,7 +67,7 @@
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_TWE | HCR_SWIO | HCR_TIDCP)
+ HCR_SWIO | HCR_TIDCP)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
/* System Control Register (SCTLR) bits */
@@ -96,12 +95,12 @@
#define TTBCR_IRGN1 (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (7 << 16)
+#define TTBCR_T1SZ (3 << 16)
#define TTBCR_SH0 (3 << 12)
#define TTBCR_ORGN0 (3 << 10)
#define TTBCR_IRGN0 (3 << 8)
#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ (7 << 0)
+#define TTBCR_T0SZ 3
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
/* Hyp System Trap Register */
@@ -209,8 +208,6 @@
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
-#define HSR_WFI_IS_WFE (1U << 0)
-
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 661da11..a2f43dd 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -39,7 +39,7 @@
#define c6_IFAR 17 /* Instruction Fault Address Register */
#define c7_PAR 18 /* Physical Address Register */
#define c7_PAR_high 19 /* PAR top 32 bits */
-#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
+#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */
#define c10_PRRR 21 /* Primary Region Remap Register */
#define c10_NMRR 22 /* Normal Memory Remap Register */
#define c12_VBAR 23 /* Vector Base Address Register */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 0fa90c9..a464e8d 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -157,55 +157,4 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.cp15[c0_MPIDR];
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- default:
- return be32_to_cpu(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- default:
- return cpu_to_be32(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 8a6f6db..7d22517 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -38,6 +38,11 @@
#define KVM_VCPU_MAX_FEATURES 1
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
#include <kvm/arm_vgic.h>
struct kvm_vcpu;
@@ -149,7 +154,6 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 77de4a4..9b28c41 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -62,12 +62,6 @@ phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
-static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
-{
- *pmd = new_pmd;
- flush_pmd_entry(pmd);
-}
-
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
*pte = new_pte;
@@ -109,15 +103,9 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= L_PTE_S2_RDWR;
}
-static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
-{
- pmd_val(*pmd) |= L_PMD_S2_RDWR;
-}
-
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
- unsigned long size)
+static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
{
/*
* If we are going to insert an instruction page and the icache is
@@ -132,7 +120,8 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if (icache_is_pipt()) {
- __cpuc_coherent_user_range(hva, hva + size);
+ unsigned long hva = gfn_to_hva(kvm, gfn);
+ __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
} else if (!icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 17a3fa2..402a2bc 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -49,7 +49,6 @@ struct machine_desc {
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
- void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 7fc4278..454d642 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -106,4 +106,8 @@ extern int dc21285_setup(int nr, struct pci_sys_data *);
extern void dc21285_preinit(void);
extern void dc21285_postinit(void);
+extern struct pci_ops via82c505_ops;
+extern int via82c505_setup(int nr, struct pci_sys_data *);
+extern void via82c505_init(void *sysdata);
+
#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 608516e..fc82a88 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,14 +42,6 @@ extern void mcpm_entry_point(void);
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
- * This sets an early poke i.e a value to be poked into some address
- * from very early assembly code before the CPU is ungated. The
- * address must be physical, and if 0 then nothing will happen.
- */
-void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
- unsigned long poke_phys_addr, unsigned long poke_val);
-
-/*
* CPU/cluster power operations API for higher subsystems to use.
*/
@@ -89,40 +81,10 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
- *
- * On success, the CPU is not guaranteed to be truly halted until
- * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
- * specified cpu. Until then, other CPUs should make sure they do not
- * trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
- * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
- * make sure it is powered off
- *
- * @cpu: CPU number within given cluster
- * @cluster: cluster number for the CPU
- *
- * Call this function to ensure that a pending powerdown has taken
- * effect and the CPU is safely parked before performing non-mcpm
- * operations that may affect the CPU (such as kexec trashing the
- * kernel text).
- *
- * It is *not* necessary to call this function if you only need to
- * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
- * event.
- *
- * Do not call this function unless the specified CPU has already
- * called mcpm_cpu_power_down() or has committed to doing so.
- *
- * @return:
- * - zero if the CPU is in a safely parked state
- * - nonzero otherwise (e.g., timeout)
- */
-int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
-
-/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
* @expected_residency: duration in microseconds the CPU is expected
@@ -164,7 +126,6 @@ int mcpm_cpu_powered_up(void);
struct mcpm_platform_ops {
int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void);
- int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64);
void (*powered_up)(void);
};
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 9ecccc8..e750a93 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -172,13 +172,8 @@
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
-#define __PV_BITS_7_0 0x81
-
-extern u64 __pv_phys_offset;
-extern u64 __pv_offset;
-extern void fixup_pv_table(const void *, unsigned long);
-extern const void *__pv_table_begin, *__pv_table_end;
+extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
#define __pv_stub(from,to,instr,type) \
@@ -190,65 +185,22 @@ extern const void *__pv_table_begin, *__pv_table_end;
: "=r" (to) \
: "r" (from), "I" (type))
-#define __pv_stub_mov_hi(t) \
- __asm__ volatile("@ __pv_stub_mov\n" \
- "1: mov %R0, %1\n" \
- " .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
- " .popsection\n" \
- : "=r" (t) \
- : "I" (__PV_BITS_7_0))
-
-#define __pv_add_carry_stub(x, y) \
- __asm__ volatile("@ __pv_add_carry_stub\n" \
- "1: adds %Q0, %1, %2\n" \
- " adc %R0, %R0, #0\n" \
- " .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
- " .popsection\n" \
- : "+r" (y) \
- : "r" (x), "I" (__PV_BITS_31_24) \
- : "cc")
-
-static inline phys_addr_t __virt_to_phys(unsigned long x)
+static inline unsigned long __virt_to_phys(unsigned long x)
{
- phys_addr_t t;
-
- if (sizeof(phys_addr_t) == 4) {
- __pv_stub(x, t, "add", __PV_BITS_31_24);
- } else {
- __pv_stub_mov_hi(t);
- __pv_add_carry_stub(x, t);
- }
+ unsigned long t;
+ __pv_stub(x, t, "add", __PV_BITS_31_24);
return t;
}
-static inline unsigned long __phys_to_virt(phys_addr_t x)
+static inline unsigned long __phys_to_virt(unsigned long x)
{
unsigned long t;
-
- /*
- * 'unsigned long' cast discard upper word when
- * phys_addr_t is 64 bit, and makes sure that inline
- * assembler expression receives 32 bit argument
- * in place where 'r' 32 bit operand is expected.
- */
- __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
+ __pv_stub(x, t, "sub", __PV_BITS_31_24);
return t;
}
-
#else
-
-static inline phys_addr_t __virt_to_phys(unsigned long x)
-{
- return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
-}
-
-static inline unsigned long __phys_to_virt(phys_addr_t x)
-{
- return x - PHYS_OFFSET + PAGE_OFFSET;
-}
-
+#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
#endif /* __ASSEMBLY__ */
@@ -286,33 +238,16 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
static inline void *phys_to_virt(phys_addr_t x)
{
- return (void *)__phys_to_virt(x);
+ return (void *)(__phys_to_virt((unsigned long)(x)));
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
-
-/*
- * These are for systems that have a hardware interconnect supported alias of
- * physical memory for idmap purposes. Most cases should leave these
- * untouched.
- */
-static inline phys_addr_t __virt_to_idmap(unsigned long x)
-{
- if (arch_virt_to_idmap)
- return arch_virt_to_idmap(x);
- else
- return __virt_to_phys(x);
-}
-
-#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
-
/*
* Virtual <-> DMA view memory address translations
* Again, these are *only* valid on the kernel direct mapped RAM
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 64fd151..6f18da0 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -16,7 +16,7 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
+#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 78a7793..943504f 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -102,14 +102,12 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
#else
pte = alloc_pages(PGALLOC_GFP, 0);
#endif
- if (!pte)
- return NULL;
- if (!PageHighMem(pte))
- clean_pte_table(page_address(pte));
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
+ if (pte) {
+ if (!PageHighMem(pte))
+ clean_pte_table(page_address(pte));
+ pgtable_page_ctor(pte);
}
+
return pte;
}
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 86a659a..f97ee02 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,13 +181,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
-/*
- * We don't have huge page support for short descriptors, for the moment
- * define empty stubs for use by pin_page_for_write.
- */
-#define pmd_hugewillfault(pmd) (0)
-#define pmd_thp_or_huge(pmd) (0)
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 4f95039..5689c18 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -126,8 +126,6 @@
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
-
/*
* Hyp-mode PL2 PTE definitions for LPAE.
*/
@@ -208,9 +206,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
-#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
-#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 1571d12..be956db 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
-#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
+#define FIRST_USER_ADDRESS PAGE_SIZE
/*
* Use TASK_SIZE as the ceiling argument for free_pgtables() and
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index c3d5fc1..413f387 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -22,7 +22,6 @@
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
-#include <asm/unified.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -88,17 +87,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
-#ifdef CONFIG_SMP
-#define __ALT_SMP_ASM(smp, up) \
- "9998: " smp "\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
- " " up "\n" \
- " .popsection\n"
-#else
-#define __ALT_SMP_ASM(smp, up) up
-#endif
-
/*
* Prefetching support - only ARMv5.
*/
@@ -109,22 +97,17 @@ static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
"pld\t%a0"
- :: "p" (ptr));
+ :
+ : "p" (ptr)
+ : "cc");
}
-#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define ARCH_HAS_PREFETCHW
-static inline void prefetchw(const void *ptr)
-{
- __asm__ __volatile__(
- ".arch_extension mp\n"
- __ALT_SMP_ASM(
- WASM(pldw) "\t%a0",
- WASM(pld) "\t%a0"
- )
- :: "p" (ptr));
-}
-#endif
+#define prefetchw(ptr) prefetch(ptr)
+
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define spin_lock_prefetch(x) do { } while (0)
+
#endif
#define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index b681575..4a2985e 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -11,6 +11,8 @@
#ifndef __ASMARM_PROM_H
#define __ASMARM_PROM_H
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
#ifdef CONFIG_OF
extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
new file mode 100644
index 0000000..2389b71
--- /dev/null
+++ b/arch/arm/include/asm/sched_clock.h
@@ -0,0 +1,4 @@
+/* You shouldn't include this file. Use linux/sched_clock.h instead.
+ * Temporary file until all asm/sched_clock.h users are gone
+ */
+#include <linux/sched_clock.h>
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 8d6a089..c50f0560 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-extern int arm_add_memory(u64 start, u64 size);
+extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 22a3b9b..a8cae71c 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -84,8 +84,6 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
-extern int register_ipi_completion(struct completion *completion, int cpu);
-
struct smp_operations {
#ifdef CONFIG_SMP
/*
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c607..4f2c280 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,13 +5,21 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#include <linux/prefetch.h>
+#include <asm/processor.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
+#define ALT_SMP(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+
#ifdef CONFIG_THUMB2_KERNEL
+#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
@@ -23,18 +31,17 @@
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-#define WFE(cond) __ALT_SMP_ASM( \
+#define WFE(cond) ALT_SMP( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
+#define SEV ALT_SMP("sev", "nop")
+#define WFE(cond) ALT_SMP("wfe" cond, "nop")
#endif
-#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
-
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
@@ -70,7 +77,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
u32 newval;
arch_spinlock_t lockval;
- prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
@@ -94,7 +100,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned long contended, res;
u32 slock;
- prefetchw(&lock->slock);
do {
__asm__ __volatile__(
" ldrex %0, [%3]\n"
@@ -122,14 +127,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev();
}
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.tickets.owner == lock.tickets.next;
-}
-
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+ struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+ return tickets.owner != tickets.next;
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -151,7 +152,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
- prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
@@ -170,7 +170,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
- prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
@@ -204,7 +203,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read locks are a bit more hairy:
@@ -222,7 +221,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
- prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
@@ -243,7 +241,6 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
- prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -262,7 +259,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
- prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
@@ -284,7 +280,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
+#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 47663fc..b262d2f 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -25,7 +25,7 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
- u32 lock;
+ volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 71a06b2..df5e13d 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -141,6 +141,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#endif
/*
+ * We use bit 30 of the preempt_count to indicate that kernel
+ * preemption is occurring. See <asm/hardirq.h>.
+ */
+#define PREEMPT_ACTIVE 0x40000000
+
+/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SYSCAL_AUDIT - syscall auditing active
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index def9e57..3896026 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,6 +560,37 @@ static inline void __flush_bp_all(void)
asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}
+#include <asm/cputype.h>
+#ifdef CONFIG_ARM_ERRATA_798181
+static inline int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+ /*
+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
+ */
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb(ish);
+}
+#else
+static inline int erratum_a15_798181(void)
+{
+ return 0;
+}
+
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+}
+#endif
+
/*
* flush_pmd_entry
*
@@ -666,21 +697,4 @@ extern void flush_bp_all(void);
#endif
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_ARM_ERRATA_798181
-extern void erratum_a15_798181_init(void);
-#else
-static inline void erratum_a15_798181_init(void) {}
-#endif
-extern bool (*erratum_a15_798181_handler)(void);
-
-static inline bool erratum_a15_798181(void)
-{
- if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
- erratum_a15_798181_handler))
- return erratum_a15_798181_handler();
- return false;
-}
-#endif
-
#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index b88beab..f5989f4 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -38,8 +38,6 @@
#ifdef __ASSEMBLY__
#define W(instr) instr.w
#define BSYM(sym) sym + 1
-#else
-#define WASM(instr) #instr ".w"
#endif
#else /* !CONFIG_THUMB2_KERNEL */
@@ -52,8 +50,6 @@
#ifdef __ASSEMBLY__
#define W(instr) instr
#define BSYM(sym) sym
-#else
-#define WASM(instr) #instr
#endif
#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index 1317ee4..d7ab99a 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -16,6 +16,4 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
-extern struct dma_map_ops *xen_dma_ops;
-
#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
deleted file mode 100644
index 1109017..0000000
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-mapping.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
-{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 75579a9..359a7b5 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -6,12 +6,12 @@
#include <linux/pfn.h>
#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <xen/xen.h>
#include <xen/interface/grant_table.h>
+#define pfn_to_mfn(pfn) (pfn)
#define phys_to_machine_mapping_valid(pfn) (1)
+#define mfn_to_pfn(mfn) (mfn)
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#define pte_mfn pte_pfn
@@ -32,38 +32,6 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
-unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
-extern struct rb_root phys_to_mach;
-
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
-{
- unsigned long mfn;
-
- if (phys_to_mach.rb_node != NULL) {
- mfn = __pfn_to_mfn(pfn);
- if (mfn != INVALID_P2M_ENTRY)
- return mfn;
- }
-
- return pfn;
-}
-
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
-{
- unsigned long pfn;
-
- if (phys_to_mach.rb_node != NULL) {
- pfn = __mfn_to_pfn(mfn);
- if (pfn != INVALID_P2M_ENTRY)
- return pfn;
- }
-
- return mfn;
-}
-
-#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
-
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -108,9 +76,11 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte)
return 0;
}
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
- unsigned long nr_pages);
+static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ return true;
+}
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{