From a16ede35a2659170c855c5d267776666c0630f1f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 17:59:44 +0000 Subject: ARM: bitops: ensure set/clear/change bitops take a word-aligned pointer Add additional instructions to our assembly bitops functions to ensure that they only operate on word-aligned pointers. This will be necessary when we switch these operations to use the word-based exclusive operations. Signed-off-by: Russell King diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index d422529..bd00551f 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,6 +1,8 @@ #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) .macro bitop, instr + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned mov r2, #1 and r3, r0, #7 @ Get bit offset add r1, r1, r0, lsr #3 @ Get byte offset @@ -14,6 +16,8 @@ .endm .macro testop, instr, store + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned and r3, r0, #7 @ Get bit offset mov r2, #1 add r1, r1, r0, lsr #3 @ Get byte offset @@ -32,6 +36,8 @@ .endm #else .macro bitop, instr + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned and r2, r0, #7 mov r3, #1 mov r3, r3, lsl r2 @@ -52,6 +58,8 @@ * to avoid dirtying the data cache. */ .macro testop, instr, store + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned add r1, r1, r0, lsr #3 and r3, r0, #7 mov r0, #1 -- cgit v0.10.2 From 6323f0ccedf756dfe5f46549cec69a2d6d97937b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 18:02:17 +0000 Subject: ARM: bitops: switch set/clear/change bitops to use ldrex/strex Switch the set/clear/change bitops to use the word-based exclusive operations, which are only present in a wider range of ARM architectures than the byte-based exclusive operations. Tested record: - Nicolas Pitre: ext3,rw,le - Sourav Poddar: nfs,le - Will Deacon: ext3,rw,le - Tony Lindgren: ext3+nfs,le Reviewed-by: Nicolas Pitre Tested-by: Sourav Poddar Tested-by: Will Deacon Tested-by: Tony Lindgren Signed-off-by: Russell King diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 7b1bb2b..af54ed1 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -149,14 +149,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) */ /* + * Native endian assembly bitops. nr = 0 -> word 0 bit 0. + */ +extern void _set_bit(int nr, volatile unsigned long * p); +extern void _clear_bit(int nr, volatile unsigned long * p); +extern void _change_bit(int nr, volatile unsigned long * p); +extern int _test_and_set_bit(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit(int nr, volatile unsigned long * p); +extern int _test_and_change_bit(int nr, volatile unsigned long * p); + +/* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern void _set_bit_le(int nr, volatile unsigned long * p); -extern void _clear_bit_le(int nr, volatile unsigned long * p); -extern void _change_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_le(const void * p, unsigned size); extern int _find_next_zero_bit_le(const void * p, int size, int offset); extern int _find_first_bit_le(const unsigned long *p, unsigned size); @@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern void _set_bit_be(int nr, volatile unsigned long * p); -extern void _clear_bit_be(int nr, volatile unsigned long * p); -extern void _change_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_be(const void * p, unsigned size); extern int _find_next_zero_bit_be(const void * p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); @@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); /* * The __* form of bitops are non-atomic and may be reordered. */ -#define ATOMIC_BITOP_LE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_le(nr,p)) - -#define ATOMIC_BITOP_BE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_be(nr,p)) +#define ATOMIC_BITOP(name,nr,p) \ + (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) #else -#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) -#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) +#define ATOMIC_BITOP(name,nr,p) _##name(nr,p) #endif -#define NONATOMIC_BITOP(name,nr,p) \ - (____nonatomic_##name(nr, p)) +/* + * Native endian atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) #ifndef __ARMEB__ /* * These are the little endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define WORD_BITOFF_TO_LE(x) ((x)) #else - /* * These are the big endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_be(p,sz) diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e53..d5d4185 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp); #endif /* bitops */ -EXPORT_SYMBOL(_set_bit_le); -EXPORT_SYMBOL(_test_and_set_bit_le); -EXPORT_SYMBOL(_clear_bit_le); -EXPORT_SYMBOL(_test_and_clear_bit_le); -EXPORT_SYMBOL(_change_bit_le); -EXPORT_SYMBOL(_test_and_change_bit_le); +EXPORT_SYMBOL(_set_bit); +EXPORT_SYMBOL(_test_and_set_bit); +EXPORT_SYMBOL(_clear_bit); +EXPORT_SYMBOL(_test_and_clear_bit); +EXPORT_SYMBOL(_change_bit); +EXPORT_SYMBOL(_test_and_change_bit); EXPORT_SYMBOL(_find_first_zero_bit_le); EXPORT_SYMBOL(_find_next_zero_bit_le); EXPORT_SYMBOL(_find_first_bit_le); EXPORT_SYMBOL(_find_next_bit_le); #ifdef __ARMEB__ -EXPORT_SYMBOL(_set_bit_be); -EXPORT_SYMBOL(_test_and_set_bit_be); -EXPORT_SYMBOL(_clear_bit_be); -EXPORT_SYMBOL(_test_and_clear_bit_be); -EXPORT_SYMBOL(_change_bit_be); -EXPORT_SYMBOL(_test_and_change_bit_be); EXPORT_SYMBOL(_find_first_zero_bit_be); EXPORT_SYMBOL(_find_next_zero_bit_be); EXPORT_SYMBOL(_find_first_bit_be); diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index bd00551f..a9d9d15 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,15 +1,15 @@ - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) +#if __LINUX_ARM_ARCH__ >= 6 .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned mov r2, #1 - and r3, r0, #7 @ Get bit offset - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 -1: ldrexb r2, [r1] +1: ldrex r2, [r1] \instr r2, r2, r3 - strexb r0, r2, [r1] + strex r0, r2, [r1] cmp r0, #0 bne 1b mov pc, lr @@ -18,15 +18,16 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r3, r0, #7 @ Get bit offset mov r2, #1 - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 @ create mask smp_dmb -1: ldrexb r2, [r1] +1: ldrex r2, [r1] ands r0, r2, r3 @ save old value of bit - \instr r2, r2, r3 @ toggle bit - strexb ip, r2, [r1] + \instr r2, r2, r3 @ toggle bit + strex ip, r2, [r1] cmp ip, #0 bne 1b smp_dmb @@ -38,13 +39,14 @@ .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r2, r0, #7 + and r2, r0, #31 + mov r0, r0, lsr #5 mov r3, #1 mov r3, r3, lsl r2 save_and_disable_irqs ip - ldrb r2, [r1, r0, lsr #3] + ldr r2, [r1, r0, lsl #2] \instr r2, r2, r3 - strb r2, [r1, r0, lsr #3] + str r2, [r1, r0, lsl #2] restore_irqs ip mov pc, lr .endm @@ -60,11 +62,11 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - add r1, r1, r0, lsr #3 - and r3, r0, #7 - mov r0, #1 + and r3, r0, #31 + mov r0, r0, lsr #5 save_and_disable_irqs ip - ldrb r2, [r1] + ldr r2, [r1, r0, lsl #2]! + mov r0, #1 tst r2, r0, lsl r3 \instr r2, r2, r0, lsl r3 \store r2, [r1] diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S index 80f3115..68ed5b6 100644 --- a/arch/arm/lib/changebit.S +++ b/arch/arm/lib/changebit.S @@ -12,12 +12,6 @@ #include "bitops.h" .text -/* Purpose : Function to change a bit - * Prototype: int change_bit(int bit, void *addr) - */ -ENTRY(_change_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_change_bit_le) +ENTRY(_change_bit) bitop eor -ENDPROC(_change_bit_be) -ENDPROC(_change_bit_le) +ENDPROC(_change_bit) diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S index 1a63e43..4c04c3b 100644 --- a/arch/arm/lib/clearbit.S +++ b/arch/arm/lib/clearbit.S @@ -12,13 +12,6 @@ #include "bitops.h" .text -/* - * Purpose : Function to clear a bit - * Prototype: int clear_bit(int bit, void *addr) - */ -ENTRY(_clear_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_clear_bit_le) +ENTRY(_clear_bit) bitop bic -ENDPROC(_clear_bit_be) -ENDPROC(_clear_bit_le) +ENDPROC(_clear_bit) diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S index 1dd7176..bbee5c6 100644 --- a/arch/arm/lib/setbit.S +++ b/arch/arm/lib/setbit.S @@ -12,13 +12,6 @@ #include "bitops.h" .text -/* - * Purpose : Function to set a bit - * Prototype: int set_bit(int bit, void *addr) - */ -ENTRY(_set_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_set_bit_le) +ENTRY(_set_bit) bitop orr -ENDPROC(_set_bit_be) -ENDPROC(_set_bit_le) +ENDPROC(_set_bit) diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S index 5c98dc5..15a4d43 100644 --- a/arch/arm/lib/testchangebit.S +++ b/arch/arm/lib/testchangebit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_change_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_change_bit_le) - testop eor, strb -ENDPROC(_test_and_change_bit_be) -ENDPROC(_test_and_change_bit_le) +ENTRY(_test_and_change_bit) + testop eor, str +ENDPROC(_test_and_change_bit) diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S index 543d709..521b66b 100644 --- a/arch/arm/lib/testclearbit.S +++ b/arch/arm/lib/testclearbit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_clear_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_clear_bit_le) - testop bicne, strneb -ENDPROC(_test_and_clear_bit_be) -ENDPROC(_test_and_clear_bit_le) +ENTRY(_test_and_clear_bit) + testop bicne, strne +ENDPROC(_test_and_clear_bit) diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S index 0b3f390..1c98cc2 100644 --- a/arch/arm/lib/testsetbit.S +++ b/arch/arm/lib/testsetbit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_set_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_set_bit_le) - testop orreq, streqb -ENDPROC(_test_and_set_bit_be) -ENDPROC(_test_and_set_bit_le) +ENTRY(_test_and_set_bit) + testop orreq, streq +ENDPROC(_test_and_set_bit) -- cgit v0.10.2 From 000d9c78eb5cd7f18e3d6a381d66e606bc9b8196 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 15 Jan 2011 16:22:12 +0000 Subject: ARM: v6k: remove CPU_32v6K dependencies in asm/spinlock.h SMP requires at least the ARMv6K extensions to be present, so if we're running on SMP, the WFE and SEV instructions must be available. However, when we run on UP, the v6K extensions may not be available, and so we don't want WFE/SEV to be in the instruction stream. Use the SMP alternatives infrastructure to replace these instructions with NOPs if we build for SMP but run on UP. Tested-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355..da1af52 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,17 +5,36 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +/* + * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K + * extensions, so when running on UP, we have to patch these instructions away. + */ +#define ALT_SMP(smp, up) \ + "9998: " smp "\n" \ + " .pushsection \".alt.smp.init\", \"a\"\n" \ + " .long 9998b\n" \ + " " up "\n" \ + " .popsection\n" + +#ifdef CONFIG_THUMB2_KERNEL +#define SEV ALT_SMP("sev.w", "nop.w") +#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +#else +#define SEV ALT_SMP("sev", "nop") +#define WFE(cond) ALT_SMP("wfe" cond, "nop") +#endif + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( "dsb\n" - "sev" + SEV ); -#elif defined(CONFIG_CPU_32v6K) +#else __asm__ __volatile__ ( "mcr p15, 0, %0, c7, c10, 4\n" - "sev" + SEV : : "r" (0) ); #endif @@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" @@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" @@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" -#ifdef CONFIG_CPU_32v6K -" wfemi\n" -#endif + WFE("mi") " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) -- cgit v0.10.2 From e399b1a4e1d205bdc816cb550d2064f2eb1ddc4c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:08:32 +0000 Subject: ARM: v6k: introduce CPU_V6K option Introduce a CPU_V6K configuration option for platforms to select if they have a V6K CPU core. This allows us to identify whether we need to support ARMv6 CPUs without the V6K SMP extensions at build time. Currently CPU_V6K is just an alias for CPU_V6, and all places which reference CPU_V6 are replaced by (CPU_V6 || CPU_V6K). Select CPU_V6K from platforms which are known to be V6K-only. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165..95ba92f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,7 +24,7 @@ config ARM select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7)) + select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_C_RECORDMCOUNT select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ @@ -1048,7 +1048,7 @@ config XSCALE_PMU default y config CPU_HAS_PMU - depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ + depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \ (!ARCH_OMAP3 || OMAP3_EMU) default y bool @@ -1064,7 +1064,7 @@ endif config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" - depends on CPU_V6 + depends on CPU_V6 || CPU_V6K help Invalidation of the Instruction Cache operation can fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. @@ -1361,7 +1361,7 @@ config HZ config THUMB2_KERNEL bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" - depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL + depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL select AEABI select ARM_ASM_UNIFIED help @@ -1852,7 +1852,7 @@ config FPE_FASTFPE config VFP bool "VFP-format floating point maths" - depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON + depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON help Say Y to include VFP support code in the kernel. This is needed if your hardware includes a VFP unit. diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1ad..9c43052 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) +tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) ifeq ($(CONFIG_AEABI),y) CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 7193884..91f20f0 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -21,7 +21,7 @@ #if defined(CONFIG_DEBUG_ICEDCC) -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro loadsp, rb, tmp .endm .macro writeb, ch, rb diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index e653a6d..4657e87 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -36,7 +36,7 @@ extern void error(char *x); #ifdef CONFIG_DEBUG_ICEDCC -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) static void icedcc_putc(int ch) { diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa..7d0614f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,7 +116,7 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) //# ifdef _CACHE # define MULTI_CACHE 1 //# else @@ -316,7 +316,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 * will fall through to use __flush_icache_all_generic. */ -#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ +#if (defined(CONFIG_CPU_V7) && \ + (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ defined(CONFIG_SMP_ON_UP) #define __flush_icache_preferred __cpuc_flush_icache_all #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9b..296ca47 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -231,7 +231,7 @@ # endif #endif -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) # ifdef CPU_NAME # undef MULTI_CPU # define MULTI_CPU diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a0f0752..d2d983b 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -25,7 +25,7 @@ .macro addruart, rp, rv .endm -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c058bfc..6fc2d22 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -30,7 +30,7 @@ * enable the interrupt. */ -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) enum armv6_perf_types { ARMV6_PERFCTR_ICACHE_MISS = 0x0, ARMV6_PERFCTR_IBUF_STALL = 0x1, @@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void) { return NULL; } -#endif /* CONFIG_CPU_V6 */ +#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9d30c6f..559e933 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -402,16 +402,18 @@ config CPU_V6 select CPU_TLB_V6 if MMU # ARMv6k -config CPU_32v6K - bool "Support ARM V6K processor extensions" if !SMP - depends on CPU_V6 || CPU_V7 - default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) - help - Say Y here if your ARMv6 processor supports the 'K' extension. - This enables the kernel to use some instructions not present - on previous processors, and as such a kernel build with this - enabled will not boot on processors with do not support these - instructions. +config CPU_V6K + bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE + select CPU_32v6 + select CPU_32v6K if !ARCH_OMAP2 + select CPU_ABRT_EV6 + select CPU_PABRT_V6 + select CPU_CACHE_V6 + select CPU_CACHE_VIPT + select CPU_CP15_MMU + select CPU_HAS_ASID if MMU + select CPU_COPY_V6 if MMU + select CPU_TLB_V6 if MMU # ARMv7 config CPU_V7 @@ -453,6 +455,17 @@ config CPU_32v6 bool select TLS_REG_EMUL if !CPU_32v6K && !MMU +config CPU_32v6K + bool "Support ARM V6K processor extensions" if !SMP + depends on CPU_V6 || CPU_V6K || CPU_V7 + default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) + help + Say Y here if your ARMv6 processor supports the 'K' extension. + This enables the kernel to use some instructions not present + on previous processors, and as such a kernel build with this + enabled will not boot on processors with do not support these + instructions. + config CPU_32v7 bool @@ -623,7 +636,7 @@ comment "Processor Features" config ARM_THUMB bool "Support Thumb user binaries" - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON + depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON default y help Say Y if you want to include kernel support for running user space @@ -681,7 +694,7 @@ config CPU_BIG_ENDIAN config CPU_ENDIAN_BE8 bool depends on CPU_BIG_ENDIAN - default CPU_V6 || CPU_V7 + default CPU_V6 || CPU_V6K || CPU_V7 help Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. @@ -747,7 +760,7 @@ config CPU_CACHE_ROUND_ROBIN config CPU_BPREDICT_DISABLE bool "Disable branch prediction" - depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 + depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 help Say Y here to disable branch prediction. If unsure, say N. @@ -767,7 +780,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" - depends on CPU_V6 && SMP + depends on (CPU_V6 || CPU_V6K) && SMP default y help The Snoop Control Unit on ARM11MPCore does not detect the @@ -823,7 +836,7 @@ config CACHE_L2X0 config CACHE_PL310 bool depends on CACHE_L2X0 - default y if CPU_V7 && !CPU_V6 + default y if CPU_V7 && !(CPU_V6 || CPU_V6K) help This option enables optimisations for the PL310 cache controller. @@ -851,10 +864,10 @@ config ARM_L1_CACHE_SHIFT default 5 config ARM_DMA_MEM_BUFFERABLE - bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 + bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ MACH_REALVIEW_PB11MP) - default y if CPU_V6 || CPU_V7 + default y if CPU_V6 || CPU_V6K || CPU_V7 help Historically, the kernel has used strongly ordered mappings to provide DMA coherent memory. With the advent of ARMv7, mapping diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 00d74a0..bca7e61 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o +obj-$(CONFIG_CPU_V6K) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o AFLAGS_proc-v6.o :=-Wa,-march=armv6 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index b0a9830..afe209e 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) unsigned int cache_type; int do_align = 0, aliasing = 0; -- cgit v0.10.2 From 74200e6421882bfb53677d63a134d89a919815c1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 18:23:31 +0000 Subject: ARM: v6k: Realview EB 11MPCore and PB11MPCore use V6K architecture CPUs Make Realview EB ARM11MPCore and PB11MPCore select the new V6K CPU option. Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index 7ca138a..b9a9805 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig @@ -19,7 +19,7 @@ config REALVIEW_EB_A9MP config REALVIEW_EB_ARM11MP bool "Support ARM11MPCore Tile" depends on MACH_REALVIEW_EB - select CPU_V6 + select CPU_V6K select ARCH_HAS_BARRIERS if SMP help Enable support for the ARM11MPCore tile fitted to the Realview(R) @@ -36,7 +36,7 @@ config REALVIEW_EB_ARM11MP_REVB config MACH_REALVIEW_PB11MP bool "Support RealView(R) Platform Baseboard for ARM11MPCore" - select CPU_V6 + select CPU_V6K select ARM_GIC select HAVE_PATA_PLATFORM select ARCH_HAS_BARRIERS if SMP @@ -45,6 +45,7 @@ config MACH_REALVIEW_PB11MP the ARM11MPCore. This platform has an on-board ARM11MPCore and has support for PCI-E and Compact Flash. +# ARMv6 CPU without K extensions, but does have the new exclusive ops config MACH_REALVIEW_PB1176 bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" select CPU_V6 -- cgit v0.10.2 From c786282e6dd18fe2ff43ab44411085dc40e7fbc5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 18:20:05 +0000 Subject: ARM: v6k: Dove platforms use V6K architecture CPUs Make Dove platforms select the new V6K CPU option. Tested-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 95ba92f..7a5fe5d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -457,6 +457,7 @@ config ARCH_IXP4XX config ARCH_DOVE bool "Marvell Dove" + select CPU_V6K select PCI select ARCH_REQUIRE_GPIOLIB select GENERIC_CLOCKEVENTS diff --git a/arch/arm/mach-dove/Kconfig b/arch/arm/mach-dove/Kconfig index a4ed390..dd937c5 100644 --- a/arch/arm/mach-dove/Kconfig +++ b/arch/arm/mach-dove/Kconfig @@ -9,7 +9,7 @@ config MACH_DOVE_DB Say 'Y' here if you want your kernel to support the Marvell DB-MV88AP510 Development Board. - config MACH_CM_A510 +config MACH_CM_A510 bool "CompuLab CM-A510 Board" help Say 'Y' here if you want your kernel to support the diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 559e933..22a3f4a 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -390,7 +390,7 @@ config CPU_PJ4 # ARMv6 config CPU_V6 - bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE + bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6 select CPU_ABRT_EV6 select CPU_PABRT_V6 @@ -403,7 +403,7 @@ config CPU_V6 # ARMv6k config CPU_V6K - bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE + bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6 select CPU_32v6K if !ARCH_OMAP2 select CPU_ABRT_EV6 -- cgit v0.10.2 From 7db44c75a241c18d03e82540c5b825216d4c668b Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:35:37 +0000 Subject: ARM: v6k: select clear exclusive code seqences according to V6 variants If CONFIG_CPU_V6 is enabled, then the kernel must support ARMv6 CPUs which don't have the V6K extensions implemented. Always use the dummy store-exclusive method to ensure that the exclusive monitors are cleared. If CONFIG_CPU_V6 is not set, but CONFIG_CPU_32v6K is enabled, then we have the K extensions available on all CPUs we're building support for, so we can use the new clear-exclusive instruction. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index ae94649..051166c 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -76,13 +76,13 @@ #ifndef CONFIG_THUMB2_KERNEL .macro svc_exit, rpsr msr spsr_cxsf, \rpsr -#if defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#elif defined (CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) ldr r0, [sp] strex r1, r2, [sp] @ clear the exclusive monitor ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr +#elif defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr #else ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr #endif @@ -92,10 +92,10 @@ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc -#if defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor -#elif defined (CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) strex r1, r2, [sp] @ clear the exclusive monitor +#elif defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor #endif .if \fast ldmdb sp, {r1 - lr}^ @ get calling r1 - lr diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index f332df7..1478aa5 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -20,11 +20,11 @@ */ .align 5 ENTRY(v6_early_abort) -#ifdef CONFIG_CPU_32v6K - clrex -#else +#ifdef CONFIG_CPU_V6 sub r1, sp, #4 @ Get unused stack location strex r0, r1, [r1] @ Clear the exclusive monitor +#elif defined(CONFIG_CPU_32v6K) + clrex #endif mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR -- cgit v0.10.2 From 4ed67a53591db641543d57f31c182591a429dc93 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:42:42 +0000 Subject: ARM: v6k: select cmpxchg code sequences according to V6 variants If CONFIG_CPU_V6 is enabled, we must avoid the byte/halfword/doubleword exclusive operations, which aren't implemented before V6K. Use the generic versions (or omit them) instead. If CONFIG_CPU_V6 is not set, but CONFIG_CPU_32v6K is enabled, we have the K extnesions, so use these new instructions. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60..9a87823 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -347,6 +347,7 @@ void cpu_idle_wait(void); #include #if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ #ifdef CONFIG_SMP #error "SMP is not supported on this platform" @@ -365,7 +366,7 @@ void cpu_idle_wait(void); #include #endif -#else /* __LINUX_ARM_ARCH__ >= 6 */ +#else /* min ARCH >= ARMv6 */ extern void __bad_cmpxchg(volatile void *ptr, int size); @@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long oldval, res; switch (size) { -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ case 1: do { asm volatile("@ __cmpxchg1\n" @@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, : "memory", "cc"); } while (res); break; -#endif /* CONFIG_CPU_32v6K */ +#endif case 4: do { asm volatile("@ __cmpxchg4\n" @@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long ret; switch (size) { -#ifndef CONFIG_CPU_32v6K +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ case 1: case 2: ret = __cmpxchg_local_generic(ptr, old, new, size); break; -#endif /* !CONFIG_CPU_32v6K */ +#endif default: ret = __cmpxchg(ptr, old, new, size); } @@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), \ sizeof(*(ptr)))) -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ /* * Note : ARMv7-M (currently unsupported by Linux) does not support @@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, (unsigned long long)(o), \ (unsigned long long)(n))) -#else /* !CONFIG_CPU_32v6K */ +#else /* min ARCH = ARMv6 */ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif /* CONFIG_CPU_32v6K */ +#endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -- cgit v0.10.2 From a41297a0ffb43fda0a565bdd3ee405d16505820c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:48:33 +0000 Subject: ARM: v6k: select generic atomic64 code according to V6 variants If CONFIG_CPU_V6 is enabled, avoid using the double-word exclusive instructions in the kernel's atomic implementations as these are not supported. Fall back to the generic spinlock code instead. Acked-by: Tony Lindgren Acked-by: Will Deacon Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7a5fe5d..27ec471 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -7,7 +7,7 @@ config ARM select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION - select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI) + select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_ARCH_KGDB select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) -- cgit v0.10.2 From 37bc618fe2689a7f8de8fac82e72b00ecea4d43d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 16:38:56 +0000 Subject: ARM: v6k: select TLS register code according to V6 variants If CONFIG_CPU_V6 is enabled, we may or may not have the TLS register. Use the conditional code which copes with this variability. Otherwise, if CONFIG_CPU_32v6K is set, we know we have the TLS register on all supported CPUs, so use it unconditionally. Acked-by: Nicolas Pitre Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff..60843eb 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -28,15 +28,14 @@ #define tls_emu 1 #define has_tls_reg 1 #define set_tls set_tls_none -#elif __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define tls_emu 0 -#define has_tls_reg 1 -#define set_tls set_tls_v6k -#elif __LINUX_ARM_ARCH__ == 6 +#elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) #define set_tls set_tls_v6 +#elif defined(CONFIG_CPU_32v6K) +#define tls_emu 0 +#define has_tls_reg 1 +#define set_tls set_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 -- cgit v0.10.2 From 8762df4d5c5462bea8ad0f76473f39e9ce9b8662 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:53:56 +0000 Subject: ARM: v6k: use CPU domain feature if we include support for arch < ARMv6K Rather than turning off CPU domain switching when the build architecture includes ARMv6K, thereby causing problems for ARMv6-supporting kernels, turn it on when it's required to support a CPU architecture. Acked-by: Nicolas Pitre Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 22a3f4a..29215f5 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -435,25 +435,30 @@ config CPU_32v3 bool select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU config CPU_32v4 bool select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU config CPU_32v4T bool select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU config CPU_32v5 bool select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU config CPU_32v6 bool select TLS_REG_EMUL if !CPU_32v6K && !MMU + select CPU_USE_DOMAINS if CPU_V6 && MMU config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP @@ -620,8 +625,6 @@ config CPU_CP15_MPU config CPU_USE_DOMAINS bool - depends on MMU - default y if !CPU_32v6K help This option enables or disables the use of domain switching via the set_fs() function. -- cgit v0.10.2 From 60799c6dd727b72b9c6794e9d1ab2d2b01a87ca4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 15 Jan 2011 16:25:04 +0000 Subject: ARM: v6k: do not disable CPU_32v6K based on platform selection CPU_32v6K controls whether we use the ARMv6K extension instructions in the kernel, and in some places whether we use SMP-safe code sequences (eg, bitops.) MX3 prevents the selection of this option to ensure that it is not enabled for their CPU, which is ARMv6 only. Now that we've split the CPU_V6 option, V6K support won't be offered for MX3 anymore. OMAP prevents the selection of this option in an attempt to produce a kernel which runs on architectures from ARMv6 to ARMv7 MPCore. We now achieve this in a different way (see the previous patches). As such, we no longer need to offer this as a configuration option to the user. Tested-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 29215f5..ca1238b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -405,7 +405,7 @@ config CPU_V6 config CPU_V6K bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6 - select CPU_32v6K if !ARCH_OMAP2 + select CPU_32v6K select CPU_ABRT_EV6 select CPU_PABRT_V6 select CPU_CACHE_V6 @@ -418,7 +418,7 @@ config CPU_V6K # ARMv7 config CPU_V7 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX - select CPU_32v6K if !ARCH_OMAP2 + select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 select CPU_PABRT_V7 @@ -461,15 +461,7 @@ config CPU_32v6 select CPU_USE_DOMAINS if CPU_V6 && MMU config CPU_32v6K - bool "Support ARM V6K processor extensions" if !SMP - depends on CPU_V6 || CPU_V6K || CPU_V7 - default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) - help - Say Y here if your ARMv6 processor supports the 'K' extension. - This enables the kernel to use some instructions not present - on previous processors, and as such a kernel build with this - enabled will not boot on processors with do not support these - instructions. + bool config CPU_32v7 bool -- cgit v0.10.2 From 581388c15bd61a2548af0b87152648741d038571 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 17:27:48 +0000 Subject: ARM: v6k: allow swp emulation again when ARMv7 is enabled Now that we build a v6+v6k+v7 kernel with -march=armv6k for everything, we don't need to disable swp emulation to work around the build problem with OMAP. Tested-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ca1238b..843bc8c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -652,7 +652,7 @@ config ARM_THUMBEE config SWP_EMULATE bool "Emulate SWP/SWPB instructions" - depends on CPU_V7 && !CPU_V6 + depends on CPU_V7 select HAVE_PROC_CPU if PROC_FS default y if SMP help -- cgit v0.10.2 From fbb4ddacb6b70b3178fcb7e3debc5180e6b6cd2f Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 18:01:58 +0000 Subject: ARM: v6k: only allow SMP if we have v6k or v7 CPU SMP extensions are only supported on ARMv6k or ARMv7 architectures, so only offer the option if we're building for such an architecture. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 27ec471..d3f5674 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1251,6 +1251,7 @@ source "kernel/time/Kconfig" config SMP bool "Symmetric Multi-Processing (EXPERIMENTAL)" depends on EXPERIMENTAL + depends on CPU_V6K || CPU_V7 depends on GENERIC_CLOCKEVENTS depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ -- cgit v0.10.2 From 3bc28c8edc4f5f78d9ec23fb0f20df29b7b3a072 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 18 Jan 2011 13:30:33 +0000 Subject: ARM: v6k: DMA_CACHE_RWFO isn't appropriate for non-v6k CPUs Limit DMA_CACHE_RWFO to only v6k SMP CPUs - V6 CPUs aren't SMP capable, so the read/write for ownership work-around doesn't apply to them. Acked-by: Will Deacon Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 843bc8c..808b832 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -775,7 +775,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" - depends on (CPU_V6 || CPU_V6K) && SMP + depends on CPU_V6K && SMP default y help The Snoop Control Unit on ARM11MPCore does not detect the -- cgit v0.10.2 From 774c096bf9e49eebf7b5d2d9fdddf632c29ccea0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 23 Jan 2011 13:04:53 +0000 Subject: ARM: v6/v7 cache: allow cache calls to be optimized The v6 cache call optimization was disabled to allow the optional block cache operations to be subsituted on CPUs which supported those operations. However, as that functionality was removed, we no longer need to prevent this optimization being taken advantage of. The v7 cache call optimization was just a copy of the v6, so also fix that too. Tested-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 7d0614f..d9b4c42 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,20 +116,20 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V6) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif +# else +# define _CACHE v6 +# endif #endif -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V7) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif +# else +# define _CACHE v7 +# endif #endif #if !defined(_CACHE) && !defined(MULTI_CACHE) -- cgit v0.10.2 From 917692f5f7ec63de3b093c825913d68e910db282 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 9 Feb 2011 12:06:59 +0100 Subject: ARM: 6655/1: Correct WFE() in asm/spinlock.h for Thumb-2 The content for ALT_SMP() in the definition of WFE() expands to 6 bytes (IT cc ; WFEcc.W), which breaks the assumptions of the fixup code, leading to lockups when the affected code gets run. This patch works around the problem by explicitly using an IT + WFEcc.N pair. Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index da1af52..fdd3820 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -18,7 +18,23 @@ #ifdef CONFIG_THUMB2_KERNEL #define SEV ALT_SMP("sev.w", "nop.w") -#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +/* + * For Thumb-2, special care is needed to ensure that the conditional WFE + * instruction really does assemble to exactly 4 bytes (as required by + * the SMP_ON_UP fixup code). By itself "wfene" might cause the + * assembler to insert a extra (16-bit) IT instruction, depending on the + * presence or absence of neighbouring conditional instructions. + * + * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * the assembler won't change IT instructions which are explicitly present + * in the input. + */ +#define WFE(cond) ALT_SMP( \ + "it " cond "\n\t" \ + "wfe" cond ".n", \ + \ + "nop.w" \ +) #else #define SEV ALT_SMP("sev", "nop") #define WFE(cond) ALT_SMP("wfe" cond, "nop") -- cgit v0.10.2 From 3ba6e69ad887f8a814267ed36fd4bfbddf8855a9 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 8 Feb 2011 12:09:52 +0100 Subject: ARM: 6653/1: bitops: Use BX instead of MOV PC,LR The kernel doesn't officially need to interwork, but using BX wherever appropriate will help educate people into good assembler coding habits. BX is appropriate here because this code is predicated on __LINUX_ARM_ARCH__ >= 6 Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index a9d9d15..10d868a 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -12,7 +12,7 @@ strex r0, r2, [r1] cmp r0, #0 bne 1b - mov pc, lr + bx lr .endm .macro testop, instr, store @@ -33,7 +33,7 @@ smp_dmb cmp r0, #0 movne r0, #1 -2: mov pc, lr +2: bx lr .endm #else .macro bitop, instr -- cgit v0.10.2