diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 12:35:57 (GMT) |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 12:35:57 (GMT) |
commit | 2ca1a615835d9f4990f42102ab1f2ef434e7e89c (patch) | |
tree | 726cf3d5f29a6c66c44e4bd68e7ebed2fd83d059 /arch/arm/mm | |
parent | e12f0102ac81d660c9f801d0a0e10ccf4537a9de (diff) | |
parent | 6a94cb73064c952255336cc57731904174b2c58f (diff) | |
download | linux-fsl-qoriq-2ca1a615835d9f4990f42102ab1f2ef434e7e89c.tar.xz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/x86/kernel/io_apic.c
Diffstat (limited to 'arch/arm/mm')
29 files changed, 764 insertions, 664 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ab5f7a2..d490f37 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -10,8 +10,7 @@ config CPU_32 # ARM610 config CPU_ARM610 - bool "Support ARM610 processor" - depends on ARCH_RPC + bool "Support ARM610 processor" if ARCH_RPC select CPU_32v3 select CPU_CACHE_V3 select CPU_CACHE_VIVT @@ -43,8 +42,7 @@ config CPU_ARM7TDMI # ARM710 config CPU_ARM710 - bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC - default y if ARCH_CLPS7500 + bool "Support ARM710 processor" if ARCH_RPC select CPU_32v3 select CPU_CACHE_V3 select CPU_CACHE_VIVT @@ -63,8 +61,7 @@ config CPU_ARM710 # ARM720T config CPU_ARM720T - bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR - default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X + bool "Support ARM720T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_LV4T select CPU_PABRT_NOIFAR @@ -114,9 +111,7 @@ config CPU_ARM9TDMI # ARM920T config CPU_ARM920T - bool "Support ARM920T processor" - depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200 - default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200 + bool "Support ARM920T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_EV4T select CPU_PABRT_NOIFAR @@ -138,8 +133,6 @@ config CPU_ARM920T # ARM922T config CPU_ARM922T bool "Support ARM922T processor" if ARCH_INTEGRATOR - depends on ARCH_LH7A40X || ARCH_INTEGRATOR || ARCH_KS8695 - default y if ARCH_LH7A40X || ARCH_KS8695 select CPU_32v4T select CPU_ABRT_EV4T select CPU_PABRT_NOIFAR @@ -159,8 +152,6 @@ config CPU_ARM922T # ARM925T config CPU_ARM925T bool "Support ARM925T processor" if ARCH_OMAP1 - depends on ARCH_OMAP15XX - default y if ARCH_OMAP15XX select CPU_32v4T select CPU_ABRT_EV4T select CPU_PABRT_NOIFAR @@ -179,22 +170,7 @@ config CPU_ARM925T # ARM926T config CPU_ARM926T - bool "Support ARM926T processor" - depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || \ - MACH_VERSATILE_AB || ARCH_OMAP730 || \ - ARCH_OMAP16XX || MACH_REALVIEW_EB || \ - ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \ - ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \ - ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \ - ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \ - ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2 - default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || \ - ARCH_OMAP730 || ARCH_OMAP16XX || \ - ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \ - ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \ - ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \ - ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \ - ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2 + bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB select CPU_32v5 select CPU_ABRT_EV5TJ select CPU_PABRT_NOIFAR @@ -247,8 +223,7 @@ config CPU_ARM946E # ARM1020 - needs validating config CPU_ARM1020 - bool "Support ARM1020T (rev 0) processor" - depends on ARCH_INTEGRATOR + bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T select CPU_PABRT_NOIFAR @@ -266,8 +241,7 @@ config CPU_ARM1020 # ARM1020E - needs validating config CPU_ARM1020E - bool "Support ARM1020E processor" - depends on ARCH_INTEGRATOR + bool "Support ARM1020E processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T select CPU_PABRT_NOIFAR @@ -280,8 +254,7 @@ config CPU_ARM1020E # ARM1022E config CPU_ARM1022 - bool "Support ARM1022E processor" - depends on ARCH_INTEGRATOR + bool "Support ARM1022E processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T select CPU_PABRT_NOIFAR @@ -299,8 +272,7 @@ config CPU_ARM1022 # ARM1026EJ-S config CPU_ARM1026 - bool "Support ARM1026EJ-S processor" - depends on ARCH_INTEGRATOR + bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 select CPU_PABRT_NOIFAR @@ -317,8 +289,7 @@ config CPU_ARM1026 # SA110 config CPU_SA110 - bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && ARCH_RPC - default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI + bool "Support StrongARM(R) SA-110 processor" if ARCH_RPC select CPU_32v3 if ARCH_RPC select CPU_32v4 if !ARCH_RPC select CPU_ABRT_EV4 @@ -340,8 +311,6 @@ config CPU_SA110 # SA1100 config CPU_SA1100 bool - depends on ARCH_SA1100 - default y select CPU_32v4 select CPU_ABRT_EV4 select CPU_PABRT_NOIFAR @@ -353,8 +322,6 @@ config CPU_SA1100 # XScale config CPU_XSCALE bool - depends on ARCH_IOP32X || ARCH_IOP33X || PXA25x || PXA27x || ARCH_IXP4XX || ARCH_IXP2000 - default y select CPU_32v5 select CPU_ABRT_EV5T select CPU_PABRT_NOIFAR @@ -365,8 +332,6 @@ config CPU_XSCALE # XScale Core Version 3 config CPU_XSC3 bool - depends on ARCH_IXP23XX || ARCH_IOP13XX || PXA3xx - default y select CPU_32v5 select CPU_ABRT_EV5T select CPU_PABRT_NOIFAR @@ -378,8 +343,6 @@ config CPU_XSC3 # Feroceon config CPU_FEROCEON bool - depends on ARCH_ORION5X || ARCH_LOKI || ARCH_KIRKWOOD || ARCH_MV78XX0 - default y select CPU_32v5 select CPU_ABRT_EV5T select CPU_PABRT_NOIFAR @@ -399,10 +362,7 @@ config CPU_FEROCEON_OLD_ID # ARMv6 config CPU_V6 - bool "Support ARM V6 processor" - depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 - default y if ARCH_MX3 - default y if ARCH_MSM + bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB select CPU_32v6 select CPU_ABRT_EV6 select CPU_PABRT_NOIFAR @@ -427,8 +387,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 - bool "Support ARM V7 processor" - depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP3 + bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 @@ -745,7 +704,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" - depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 + depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || REALVIEW_EB_A9MP default y select OUTER_CACHE help diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 2d5884c..3a398be 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -17,6 +17,7 @@ #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/uaccess.h> #include <asm/unaligned.h> diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 3b3639e..8a4abeb 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -9,7 +9,6 @@ */ #include <linux/linkage.h> #include <linux/init.h> -#include <mach/hardware.h> #include <asm/page.h> #include "proc-macros.S" diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5786adf..3668611 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -9,7 +9,6 @@ */ #include <linux/linkage.h> #include <linux/init.h> -#include <mach/hardware.h> #include <asm/page.h> #include "proc-macros.S" diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 51a9b0b..c54fa2c 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -13,7 +13,6 @@ */ #include <linux/linkage.h> #include <linux/init.h> -#include <mach/hardware.h> #include <asm/page.h> #include "proc-macros.S" diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index d19c2be..be93ff0 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -26,6 +26,7 @@ * - mm - mm_struct describing address space */ ENTRY(v7_flush_dcache_all) + dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr ands r3, r0, #0x7000000 @ extract loc from clidr mov r3, r3, lsr #23 @ left align loc bit field @@ -64,6 +65,7 @@ skip: finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + dsb isb mov pc, lr ENDPROC(v7_flush_dcache_all) diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S deleted file mode 100644 index 7eb0d32..0000000 --- a/arch/arm/mm/copypage-feroceon.S +++ /dev/null @@ -1,95 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-feroceon.S - * - * Copyright (C) 2008 Marvell Semiconductors - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This handles copy_user_page and clear_user_page on Feroceon - * more optimally than the generic implementations. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/asm-offsets.h> - - .text - .align 5 - -ENTRY(feroceon_copy_user_page) - stmfd sp!, {r4-r9, lr} - mov ip, #PAGE_SZ -1: mov lr, r1 - ldmia r1!, {r2 - r9} - pld [lr, #32] - pld [lr, #64] - pld [lr, #96] - pld [lr, #128] - pld [lr, #160] - pld [lr, #192] - pld [lr, #224] - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - subs ip, ip, #(32 * 8) - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - bne 1b - mcr p15, 0, ip, c7, c10, 4 @ drain WB - ldmfd sp!, {r4-r9, pc} - - .align 5 - -ENTRY(feroceon_clear_user_page) - stmfd sp!, {r4-r7, lr} - mov r1, #PAGE_SZ/32 - mov r2, #0 - mov r3, #0 - mov r4, #0 - mov r5, #0 - mov r6, #0 - mov r7, #0 - mov ip, #0 - mov lr, #0 -1: stmia r0, {r2-r7, ip, lr} - subs r1, r1, #1 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - bne 1b - mcr p15, 0, r1, c7, c10, 4 @ drain WB - ldmfd sp!, {r4-r7, pc} - - __INITDATA - - .type feroceon_user_fns, #object -ENTRY(feroceon_user_fns) - .long feroceon_clear_user_page - .long feroceon_copy_user_page - .size feroceon_user_fns, . - feroceon_user_fns diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c new file mode 100644 index 0000000..c3ba6a9 --- /dev/null +++ b/arch/arm/mm/copypage-feroceon.c @@ -0,0 +1,111 @@ +/* + * linux/arch/arm/mm/copypage-feroceon.S + * + * Copyright (C) 2008 Marvell Semiconductors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This handles copy_user_highpage and clear_user_page on Feroceon + * more optimally than the generic implementations. + */ +#include <linux/init.h> +#include <linux/highmem.h> + +static void __attribute__((naked)) +feroceon_copy_user_page(void *kto, const void *kfrom) +{ + asm("\ + stmfd sp!, {r4-r9, lr} \n\ + mov ip, %0 \n\ +1: mov lr, r1 \n\ + ldmia r1!, {r2 - r9} \n\ + pld [lr, #32] \n\ + pld [lr, #64] \n\ + pld [lr, #96] \n\ + pld [lr, #128] \n\ + pld [lr, #160] \n\ + pld [lr, #192] \n\ + pld [lr, #224] \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + subs ip, ip, #(32 * 8) \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + bne 1b \n\ + mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ + ldmfd sp!, {r4-r9, pc}" + : + : "I" (PAGE_SIZE)); +} + +void feroceon_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + feroceon_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + +void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ + mov r4, #0 \n\ + mov r5, #0 \n\ + mov r6, #0 \n\ + mov r7, #0 \n\ + mov ip, #0 \n\ + mov lr, #0 \n\ +1: stmia %0, {r2-r7, ip, lr} \n\ + subs r1, r1, #1 \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + bne 1b \n\ + mcr p15, 0, r1, c7, c10, 4 @ drain WB" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); +} + +struct cpu_user_fns feroceon_user_fns __initdata = { + .cpu_clear_user_highpage = feroceon_clear_user_highpage, + .cpu_copy_user_highpage = feroceon_copy_user_highpage, +}; + diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S deleted file mode 100644 index 2ee394b..0000000 --- a/arch/arm/mm/copypage-v3.S +++ /dev/null @@ -1,67 +0,0 @@ -/* - * linux/arch/arm/lib/copypage.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/assembler.h> -#include <asm/asm-offsets.h> - - .text - .align 5 -/* - * ARMv3 optimised copy_user_page - * - * FIXME: do we need to handle cache stuff... - */ -ENTRY(v3_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 -1: stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv3 optimised clear_user_page - * - * FIXME: do we need to handle cache stuff... - */ -ENTRY(v3_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - ldr pc, [sp], #4 - - __INITDATA - - .type v3_user_fns, #object -ENTRY(v3_user_fns) - .long v3_clear_user_page - .long v3_copy_user_page - .size v3_user_fns, . - v3_user_fns diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 0000000..70ed96c --- /dev/null +++ b/arch/arm/mm/copypage-v3.c @@ -0,0 +1,81 @@ +/* + * linux/arch/arm/mm/copypage-v3.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/highmem.h> + +/* + * ARMv3 optimised copy_user_highpage + * + * FIXME: do we need to handle cache stuff... + */ +static void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom) +{ + asm("\n\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %2 @ 1\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ +1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); +} + +void v3_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v3_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + +/* + * ARMv3 optimised clear_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void v3_clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\n\ + mov r1, %2 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); +} + +struct cpu_user_fns v3_user_fns __initdata = { + .cpu_clear_user_highpage = v3_clear_user_highpage, + .cpu_copy_user_highpage = v3_copy_user_highpage, +}; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 8d33e25..bdb5fd9 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -15,8 +15,8 @@ */ #include <linux/init.h> #include <linux/mm.h> +#include <linux/highmem.h> -#include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> @@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(minicache_lock); /* - * ARMv4 mini-dcache optimised copy_user_page + * ARMv4 mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. + * own copy_user_highpage that does the right thing. */ static void __attribute__((naked)) mc_copy_user_page(void *from, void *to) @@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } -void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void v4_mc_copy_user_highpage(struct page *from, struct page *to, + unsigned long vaddr) { - struct page *page = virt_to_page(kfrom); + void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); + set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(0xffff8000); mc_copy_user_page((void *)0xffff8000, kto); spin_unlock(&minicache_lock); + + kunmap_atomic(kto, KM_USER1); } /* * ARMv4 optimised clear_user_page */ -void __attribute__((naked)) -v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - asm volatile( - "str lr, [sp, #-4]!\n\ - mov r1, %0 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ - bne 1b @ 1\n\ - ldr pc, [sp], #4" - : - : "I" (PAGE_SIZE / 64)); + bne 1b @ 1" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4_mc_user_fns __initdata = { - .cpu_clear_user_page = v4_mc_clear_user_page, - .cpu_copy_user_page = v4_mc_copy_user_page, + .cpu_clear_user_highpage = v4_mc_clear_user_highpage, + .cpu_copy_user_highpage = v4_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wb.S b/arch/arm/mm/copypage-v4wb.S deleted file mode 100644 index 8311735..0000000 --- a/arch/arm/mm/copypage-v4wb.S +++ /dev/null @@ -1,79 +0,0 @@ -/* - * linux/arch/arm/lib/copypage.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/asm-offsets.h> - - .text - .align 5 -/* - * ARMv4 optimised copy_user_page - * - * We flush the destination cache lines just before we write the data into the - * corresponding address. Since the Dcache is read-allocate, this removes the - * Dcache aliasing issue. The writes will be forwarded to the write buffer, - * and merged as appropriate. - * - * Note: We rely on all ARMv4 processors implementing the "invalidate D line" - * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. - */ -ENTRY(v4wb_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4wb_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB - ldr pc, [sp], #4 - - __INITDATA - - .type v4wb_user_fns, #object -ENTRY(v4wb_user_fns) - .long v4wb_clear_user_page - .long v4wb_copy_user_page - .size v4wb_user_fns, . - v4wb_user_fns diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c new file mode 100644 index 0000000..3ec93da --- /dev/null +++ b/arch/arm/mm/copypage-v4wb.c @@ -0,0 +1,94 @@ +/* + * linux/arch/arm/mm/copypage-v4wb.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/highmem.h> + +/* + * ARMv4 optimised copy_user_highpage + * + * We flush the destination cache lines just before we write the data into the + * corresponding address. Since the Dcache is read-allocate, this removes the + * Dcache aliasing issue. The writes will be forwarded to the write buffer, + * and merged as appropriate. + * + * Note: We rely on all ARMv4 processors implementing the "invalidate D line" + * instruction. If your processor does not supply this, you have to write your + * own copy_user_highpage that does the right thing. + */ +static void __attribute__((naked)) +v4wb_copy_user_page(void *kto, const void *kfrom) +{ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 64)); +} + +void v4wb_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wb_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + +/* + * ARMv4 optimised clear_user_page + * + * Same story as above. + */ +void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); +} + +struct cpu_user_fns v4wb_user_fns __initdata = { + .cpu_clear_user_highpage = v4wb_clear_user_highpage, + .cpu_copy_user_highpage = v4wb_copy_user_highpage, +}; diff --git a/arch/arm/mm/copypage-v4wt.S b/arch/arm/mm/copypage-v4wt.S deleted file mode 100644 index e1f2af2..0000000 --- a/arch/arm/mm/copypage-v4wt.S +++ /dev/null @@ -1,73 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-v4.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - * - * This is for CPUs with a writethrough cache and 'flush ID cache' is - * the only supported cache operation. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/asm-offsets.h> - - .text - .align 5 -/* - * ARMv4 optimised copy_user_page - * - * Since we have writethrough caches, we don't have to worry about - * dirty data in the cache. However, we do have to ensure that - * subsequent reads are up to date. - */ -ENTRY(v4wt_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4 -1: stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4wt_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache - ldr pc, [sp], #4 - - __INITDATA - - .type v4wt_user_fns, #object -ENTRY(v4wt_user_fns) - .long v4wt_clear_user_page - .long v4wt_copy_user_page - .size v4wt_user_fns, . - v4wt_user_fns diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c new file mode 100644 index 0000000..0f1188e --- /dev/null +++ b/arch/arm/mm/copypage-v4wt.c @@ -0,0 +1,88 @@ +/* + * linux/arch/arm/mm/copypage-v4wt.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is for CPUs with a writethrough cache and 'flush ID cache' is + * the only supported cache operation. + */ +#include <linux/init.h> +#include <linux/highmem.h> + +/* + * ARMv4 optimised copy_user_highpage + * + * Since we have writethrough caches, we don't have to worry about + * dirty data in the cache. However, we do have to ensure that + * subsequent reads are up to date. + */ +static void __attribute__((naked)) +v4wt_copy_user_page(void *kto, const void *kfrom) +{ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 64)); +} + +void v4wt_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wt_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + +/* + * ARMv4 optimised clear_user_page + * + * Same story as above. + */ +void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); +} + +struct cpu_user_fns v4wt_user_fns __initdata = { + .cpu_clear_user_highpage = v4wt_clear_user_highpage, + .cpu_copy_user_highpage = v4wt_copy_user_highpage, +}; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0e21c07..4127a7b 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -10,8 +10,8 @@ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/highmem.h> -#include <asm/page.h> #include <asm/pgtable.h> #include <asm/shmparam.h> #include <asm/tlbflush.h> @@ -33,41 +33,56 @@ static DEFINE_SPINLOCK(v6_lock); * Copy the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of these pages. */ -static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void v6_copy_user_highpage_nonaliasing(struct page *to, + struct page *from, unsigned long vaddr) { + void *kto, *kfrom; + + kfrom = kmap_atomic(from, KM_USER0); + kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); + kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kfrom, KM_USER0); } /* * Clear the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of this page. */ -static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); clear_page(kaddr); + kunmap_atomic(kaddr, KM_USER0); } /* - * Copy the page, taking account of the cache colour. + * Discard data in the kernel mapping for the new page. + * FIXME: needs this MCRR to be supported. */ -static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void discard_old_kernel_data(void *kto) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long from, to; - struct page *page = virt_to_page(kfrom); - - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); - - /* - * Discard data in the kernel mapping for the new page. - * FIXME: needs this MCRR to be supported. - */ __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" : : "r" (kto), "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) : "cc"); +} + +/* + * Copy the page, taking account of the cache colour. + */ +static void v6_copy_user_highpage_aliasing(struct page *to, + struct page *from, unsigned long vaddr) +{ + unsigned int offset = CACHE_COLOUR(vaddr); + unsigned long kfrom, kto; + + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); + + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(to)); /* * Now copy the page using the same cache colour as the @@ -75,16 +90,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo */ spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); - from = from_address + (offset << PAGE_SHIFT); - to = to_address + (offset << PAGE_SHIFT); + kfrom = from_address + (offset << PAGE_SHIFT); + kto = to_address + (offset << PAGE_SHIFT); - flush_tlb_kernel_page(from); - flush_tlb_kernel_page(to); + flush_tlb_kernel_page(kfrom); + flush_tlb_kernel_page(kto); - copy_page((void *)to, (void *)from); + copy_page((void *)kto, (void *)kfrom); spin_unlock(&v6_lock); } @@ -94,20 +109,13 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo * so remap the kernel page into the same cache colour as the user * page. */ -static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long to = to_address + (offset << PAGE_SHIFT); - /* - * Discard data in the kernel mapping for the new page - * FIXME: needs this MCRR to be supported. - */ - __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" - : - : "r" (kaddr), - "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) - : "cc"); + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(page)); /* * Now clear the page using the same cache colour as @@ -115,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) */ spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); @@ -123,15 +131,15 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) } struct cpu_user_fns v6_user_fns __initdata = { - .cpu_clear_user_page = v6_clear_user_page_nonaliasing, - .cpu_copy_user_page = v6_copy_user_page_nonaliasing, + .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, + .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { - cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; - cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; + cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; + cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } return 0; diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S deleted file mode 100644 index 9a2cb43..0000000 --- a/arch/arm/mm/copypage-xsc3.S +++ /dev/null @@ -1,97 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-xsc3.S - * - * Copyright (C) 2004 Intel Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Adapted for 3rd gen XScale core, no more mini-dcache - * Author: Matt Gilbert (matthew.m.gilbert@intel.com) - */ - -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/asm-offsets.h> - -/* - * General note: - * We don't really want write-allocate cache behaviour for these functions - * since that will just eat through 8K of the cache. - */ - - .text - .align 5 -/* - * XSC3 optimised copy_user_page - * r0 = destination - * r1 = source - * r2 = virtual user address of ultimate destination page - * - * The source page may have some clean entries in the cache already, but we - * can safely ignore them - break_cow() will flush them out of the cache - * if we eventually end up using our copied page. - * - */ -ENTRY(xsc3_mc_copy_user_page) - stmfd sp!, {r4, r5, lr} - mov lr, #PAGE_SZ/64-1 - - pld [r1, #0] - pld [r1, #32] -1: pld [r1, #64] - pld [r1, #96] - -2: ldrd r2, [r1], #8 - mov ip, r0 - ldrd r4, [r1], #8 - mcr p15, 0, ip, c7, c6, 1 @ invalidate - strd r2, [r0], #8 - ldrd r2, [r1], #8 - strd r4, [r0], #8 - ldrd r4, [r1], #8 - strd r2, [r0], #8 - strd r4, [r0], #8 - ldrd r2, [r1], #8 - mov ip, r0 - ldrd r4, [r1], #8 - mcr p15, 0, ip, c7, c6, 1 @ invalidate - strd r2, [r0], #8 - ldrd r2, [r1], #8 - subs lr, lr, #1 - strd r4, [r0], #8 - ldrd r4, [r1], #8 - strd r2, [r0], #8 - strd r4, [r0], #8 - bgt 1b - beq 2b - - ldmfd sp!, {r4, r5, pc} - - .align 5 -/* - * XScale optimised clear_user_page - * r0 = destination - * r1 = virtual user address of ultimate destination page - */ -ENTRY(xsc3_mc_clear_user_page) - mov r1, #PAGE_SZ/32 - mov r2, #0 - mov r3, #0 -1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line - strd r2, [r0], #8 - strd r2, [r0], #8 - strd r2, [r0], #8 - strd r2, [r0], #8 - subs r1, r1, #1 - bne 1b - mov pc, lr - - __INITDATA - - .type xsc3_mc_user_fns, #object -ENTRY(xsc3_mc_user_fns) - .long xsc3_mc_clear_user_page - .long xsc3_mc_copy_user_page - .size xsc3_mc_user_fns, . - xsc3_mc_user_fns diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c new file mode 100644 index 0000000..39a9945 --- /dev/null +++ b/arch/arm/mm/copypage-xsc3.c @@ -0,0 +1,113 @@ +/* + * linux/arch/arm/mm/copypage-xsc3.S + * + * Copyright (C) 2004 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Adapted for 3rd gen XScale core, no more mini-dcache + * Author: Matt Gilbert (matthew.m.gilbert@intel.com) + */ +#include <linux/init.h> +#include <linux/highmem.h> + +/* + * General note: + * We don't really want write-allocate cache behaviour for these functions + * since that will just eat through 8K of the cache. + */ + +/* + * XSC3 optimised copy_user_highpage + * r0 = destination + * r1 = source + * + * The source page may have some clean entries in the cache already, but we + * can safely ignore them - break_cow() will flush them out of the cache + * if we eventually end up using our copied page. + * + */ +static void __attribute__((naked)) +xsc3_mc_copy_user_page(void *kto, const void *kfrom) +{ + asm("\ + stmfd sp!, {r4, r5, lr} \n\ + mov lr, %0 \n\ + \n\ + pld [r1, #0] \n\ + pld [r1, #32] \n\ +1: pld [r1, #64] \n\ + pld [r1, #96] \n\ + \n\ +2: ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + subs lr, lr, #1 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + bgt 1b \n\ + beq 2b \n\ + \n\ + ldmfd sp!, {r4, r5, pc}" + : + : "I" (PAGE_SIZE / 64 - 1)); +} + +void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + xsc3_mc_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + +/* + * XScale optimised clear_user_page + * r0 = destination + * r1 = virtual user address of ultimate destination page + */ +void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ +1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + subs r1, r1, #1 \n\ + bne 1b" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3"); + kunmap_atomic(kaddr, KM_USER0); +} + +struct cpu_user_fns xsc3_mc_user_fns __initdata = { + .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, + .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, +}; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index bad4933..d18f239 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -15,8 +15,8 @@ */ #include <linux/init.h> #include <linux/mm.h> +#include <linux/highmem.h> -#include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> @@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(minicache_lock); /* - * XScale mini-dcache optimised copy_user_page + * XScale mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); } -void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void xscale_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) { - struct page *page = virt_to_page(kfrom); + void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); spin_unlock(&minicache_lock); + + kunmap_atomic(kto, KM_USER1); } /* * XScale optimised clear_user_page */ -void __attribute__((naked)) -xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void +xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); asm volatile( - "mov r1, %0 \n\ + "mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ -1: mov ip, r0 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ +1: mov ip, %0 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - bne 1b \n\ - mov pc, lr" - : - : "I" (PAGE_SIZE / 32)); + bne 1b" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3", "ip"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns xscale_mc_user_fns __initdata = { - .cpu_clear_user_page = xscale_mc_clear_user_page, - .cpu_copy_user_page = xscale_mc_copy_user_page, + .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, + .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 22c9530..0455557 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/kprobes.h> #include <linux/uaccess.h> +#include <linux/page-flags.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -84,13 +85,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; } -#ifndef CONFIG_HIGHMEM /* We must not map this if we have highmem enabled */ + if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) + break; + pte = pte_offset_map(pmd, addr); printk(", *pte=%08lx", pte_val(*pte)); printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); pte_unmap(pte); -#endif } while(0); printk("\n"); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 82c4b42..34df4d9 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,6 +17,7 @@ #include <linux/initrd.h> #include <asm/mach-types.h> +#include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> @@ -64,10 +65,11 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); /* - * This is used to pass memory configuration data from paging_init - * to mem_init, and by show_mem() to skip holes in the memory map. + * This keeps memory configuration data used by a couple memory + * initialization functions, as well as show_mem() for the skipping + * of holes in the memory map. It is populated by arm_add_memory(). */ -static struct meminfo meminfo = { 0, }; +struct meminfo meminfo; void show_mem(void) { @@ -128,7 +130,7 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) { unsigned int start_pfn, i, bootmap_pfn; - start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; + start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; bootmap_pfn = 0; for_each_nodebank(i, mi, node) { @@ -331,13 +333,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) free_area_init_node(node, zone_size, start_pfn, zhole_size); } -void __init bootmem_init(struct meminfo *mi) +void __init bootmem_init(void) { + struct meminfo *mi = &meminfo; unsigned long memend_pfn = 0; int node, initrd_node; - memcpy(&meminfo, mi, sizeof(meminfo)); - /* * Locate which node contains the ramdisk image, if any. */ @@ -394,20 +395,22 @@ void __init bootmem_init(struct meminfo *mi) max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; } -static inline void free_area(unsigned long addr, unsigned long end, char *s) +static inline int free_area(unsigned long pfn, unsigned long end, char *s) { - unsigned int size = (end - addr) >> 10; + unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); + for (; pfn < end; pfn++) { + struct page *page = pfn_to_page(pfn); ClearPageReserved(page); init_page_count(page); - free_page(addr); - totalram_pages++; + __free_page(page); + pages++; } if (size && s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); + + return pages; } static inline void @@ -478,13 +481,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codepages, datapages, initpages; + unsigned int codesize, datasize, initsize; int i, node; - codepages = &_etext - &_text; - datapages = &_end - &__data_start; - initpages = &__init_end - &__init_begin; - #ifndef CONFIG_DISCONTIGMEM max_mapnr = virt_to_page(high_memory) - mem_map; #endif @@ -501,7 +500,8 @@ void __init mem_init(void) #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); + totalram_pages += free_area(PHYS_PFN_OFFSET, + __phys_to_pfn(__pa(swapper_pg_dir)), NULL); #endif /* @@ -509,18 +509,21 @@ void __init mem_init(void) * real number of pages we have in this system */ printk(KERN_INFO "Memory:"); - num_physpages = 0; for (i = 0; i < meminfo.nr_banks; i++) { num_physpages += bank_pfn_size(&meminfo.bank[i]); printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); + + codesize = _etext - _text; + datasize = _end - _data; + initsize = __init_end - __init_begin; + printk(KERN_NOTICE "Memory: %luKB available (%dK code, " "%dK data, %dK init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codepages >> 10, datapages >> 10, initpages >> 10); + codesize >> 10, datasize >> 10, initsize >> 10); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; @@ -535,11 +538,10 @@ void __init mem_init(void) void free_initmem(void) { - if (!machine_is_integrator() && !machine_is_cintegrator()) { - free_area((unsigned long)(&__init_begin), - (unsigned long)(&__init_end), - "init"); - } + if (!machine_is_integrator() && !machine_is_cintegrator()) + totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), + __phys_to_pfn(__pa(__init_end)), + "init"); } #ifdef CONFIG_BLK_DEV_INITRD @@ -549,7 +551,9 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - free_area(start, end, "initrd"); + totalram_pages += free_area(__phys_to_pfn(__pa(start)), + __phys_to_pfn(__pa(end)), + "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5d9f539..95bbe11 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -32,7 +32,5 @@ struct meminfo; struct pglist_data; void __init create_mapping(struct map_desc *md); -void __init bootmem_init(struct meminfo *mi); +void __init bootmem_init(void); void reserve_node_zero(struct pglist_data *pgdat); - -extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7f36c82..9b36c5c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -17,6 +17,7 @@ #include <asm/cputype.h> #include <asm/mach-types.h> +#include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> @@ -646,61 +647,79 @@ static void __init early_vmalloc(char **arg) "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } + + if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { + vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); + printk(KERN_WARNING + "vmalloc area is too big, limiting to %luMB\n", + vmalloc_reserve >> 20); + } } __early_param("vmalloc=", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) -static int __init check_membank_valid(struct membank *mb) +static void __init sanity_check_meminfo(void) { - /* - * Check whether this memory region has non-zero size or - * invalid node number. - */ - if (mb->size == 0 || mb->node >= MAX_NUMNODES) - return 0; - - /* - * Check whether this memory region would entirely overlap - * the vmalloc area. - */ - if (phys_to_virt(mb->start) >= VMALLOC_MIN) { - printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " - "(vmalloc region overlap).\n", - mb->start, mb->start + mb->size - 1); - return 0; - } - - /* - * Check whether this memory region would partially overlap - * the vmalloc area. - */ - if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || - phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { - unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); - - printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " - "to -%.8lx (vmalloc region overlap).\n", - mb->start, mb->start + mb->size - 1, - mb->start + newsize - 1); - mb->size = newsize; - } + int i, j; - return 1; -} + for (i = 0, j = 0; i < meminfo.nr_banks; i++) { + struct membank *bank = &meminfo.bank[j]; + *bank = meminfo.bank[i]; -static void __init sanity_check_meminfo(struct meminfo *mi) -{ - int i, j; +#ifdef CONFIG_HIGHMEM + /* + * Split those memory banks which are partially overlapping + * the vmalloc area greatly simplifying things later. + */ + if (__va(bank->start) < VMALLOC_MIN && + bank->size > VMALLOC_MIN - __va(bank->start)) { + if (meminfo.nr_banks >= NR_BANKS) { + printk(KERN_CRIT "NR_BANKS too low, " + "ignoring high memory\n"); + } else { + memmove(bank + 1, bank, + (meminfo.nr_banks - i) * sizeof(*bank)); + meminfo.nr_banks++; + i++; + bank[1].size -= VMALLOC_MIN - __va(bank->start); + bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + j++; + } + bank->size = VMALLOC_MIN - __va(bank->start); + } +#else + /* + * Check whether this memory bank would entirely overlap + * the vmalloc area. + */ + if (__va(bank->start) >= VMALLOC_MIN) { + printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " + "(vmalloc region overlap).\n", + bank->start, bank->start + bank->size - 1); + continue; + } - for (i = 0, j = 0; i < mi->nr_banks; i++) { - if (check_membank_valid(&mi->bank[i])) - mi->bank[j++] = mi->bank[i]; + /* + * Check whether this memory bank would partially overlap + * the vmalloc area. + */ + if (__va(bank->start + bank->size) > VMALLOC_MIN || + __va(bank->start + bank->size) < __va(bank->start)) { + unsigned long newsize = VMALLOC_MIN - __va(bank->start); + printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " + "to -%.8lx (vmalloc region overlap).\n", + bank->start, bank->start + bank->size - 1, + bank->start + newsize - 1); + bank->size = newsize; + } +#endif + j++; } - mi->nr_banks = j; + meminfo.nr_banks = j; } -static inline void prepare_page_table(struct meminfo *mi) +static inline void prepare_page_table(void) { unsigned long addr; @@ -712,7 +731,7 @@ static inline void prepare_page_table(struct meminfo *mi) #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ - addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; + addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; #endif for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -721,7 +740,7 @@ static inline void prepare_page_table(struct meminfo *mi) * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ - for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); + for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } @@ -738,10 +757,10 @@ void __init reserve_node_zero(pg_data_t *pgdat) * Note that this can only be in node 0. */ #ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start, + reserve_bootmem_node(pgdat, __pa(_data), _end - _data, BOOTMEM_DEFAULT); #else - reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext, + reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, BOOTMEM_DEFAULT); #endif @@ -808,7 +827,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * Allocate the vector page early. */ vectors = alloc_bootmem_low_pages(PAGE_SIZE); - BUG_ON(!vectors); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -820,7 +838,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) #ifdef CONFIG_XIP_KERNEL map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.virtual = MODULES_VADDR; - map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; + map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; map.type = MT_ROM; create_mapping(&map); #endif @@ -880,23 +898,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init paging_init(struct machine_desc *mdesc) { void *zero_page; build_mem_type_table(); - sanity_check_meminfo(mi); - prepare_page_table(mi); - bootmem_init(mi); + sanity_check_meminfo(); + prepare_page_table(); + bootmem_init(); devicemaps_init(mdesc); top_pmd = pmd_off_k(0xffff0000); /* - * allocate the zero page. Note that we count on this going ok. + * allocate the zero page. Note that this always succeeds and + * returns a zeroed result. */ zero_page = alloc_bootmem_low_pages(PAGE_SIZE); - memzero(zero_page, PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); flush_dcache_page(empty_zero_page); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 07b62b2..ad7bacc 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -10,6 +10,7 @@ #include <linux/io.h> #include <asm/cacheflush.h> +#include <asm/sections.h> #include <asm/page.h> #include <asm/mach/arch.h> @@ -25,10 +26,10 @@ void __init reserve_node_zero(pg_data_t *pgdat) * Note that this can only be in node 0. */ #ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start, + reserve_bootmem_node(pgdat, __pa(_data), _end - _data, BOOTMEM_DEFAULT); #else - reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext, + reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, BOOTMEM_DEFAULT); #endif @@ -41,27 +42,13 @@ void __init reserve_node_zero(pg_data_t *pgdat) BOOTMEM_DEFAULT); } -static void __init sanity_check_meminfo(struct meminfo *mi) -{ - int i, j; - - for (i = 0, j = 0; i < mi->nr_banks; i++) { - struct membank *mb = &mi->bank[i]; - - if (mb->size != 0 && mb->node < MAX_NUMNODES) - mi->bank[j++] = mi->bank[i]; - } - mi->nr_banks = j; -} - /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init paging_init(struct machine_desc *mdesc) { - sanity_check_meminfo(mi); - bootmem_init(mi); + bootmem_init(); } /* diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index e0f19ab..2690146 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -31,7 +31,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) if (!new_pgd) goto no_pgd; - memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); + memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 2b5ba39..4ad3bf2 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -33,8 +33,8 @@ EXPORT_SYMBOL(cpu_cache); #ifdef CONFIG_MMU #ifndef MULTI_USER -EXPORT_SYMBOL(__cpu_clear_user_page); -EXPORT_SYMBOL(__cpu_copy_user_page); +EXPORT_SYMBOL(__cpu_clear_user_highpage); +EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 294943b..f0cc599 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -71,6 +71,8 @@ ENTRY(cpu_v6_reset) * IRQs are already disabled. */ ENTRY(cpu_v6_do_idle) + mov r1, #0 + mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt mov pc, lr diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 4d3c0a7..d1ebec4 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -20,9 +20,17 @@ #define TTB_C (1 << 0) #define TTB_S (1 << 1) +#define TTB_RGN_NC (0 << 3) +#define TTB_RGN_OC_WBWA (1 << 3) #define TTB_RGN_OC_WT (2 << 3) #define TTB_RGN_OC_WB (3 << 3) +#ifndef CONFIG_SMP +#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB +#else +#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA +#endif + ENTRY(cpu_v7_proc_init) mov pc, lr ENDPROC(cpu_v7_proc_init) @@ -55,6 +63,7 @@ ENDPROC(cpu_v7_reset) * IRQs are already disabled. */ ENTRY(cpu_v7_do_idle) + dsb @ WFI may enter a low-power mode wfi mov pc, lr ENDPROC(cpu_v7_do_idle) @@ -85,7 +94,7 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mov r2, #0 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id - orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB + orr r0, r0, #TTB_FLAGS mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID isb 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 @@ -162,6 +171,11 @@ cpu_v7_name: * - cache type register is implemented */ __v7_setup: +#ifdef CONFIG_SMP + mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode + orr r0, r0, #(0x1 << 6) + mcr p15, 0, r0, c1, c0, 1 +#endif adr r12, __v7_setup_stack @ the local stack stmia r12, {r0-r5, r7, r9, r11, lr} bl v7_flush_dcache_all @@ -174,8 +188,7 @@ __v7_setup: #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r10, c2, c0, 2 @ TTB control register - orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB - mcr p15, 0, r4, c2, c0, 0 @ load TTB0 + orr r4, r4, #TTB_FLAGS mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mov r10, #0x1f @ domains 0, 1 = manager mcr p15, 0, r10, c3, c0, 0 @ load domain access register diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 8f6cf56..33515c2 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -481,3 +481,28 @@ __xsc3_proc_info: .long xsc3_mc_user_fns .long xsc3_cache_fns .size __xsc3_proc_info, . - __xsc3_proc_info + +/* Note: PXA935 changed its implementor ID from Intel to Marvell */ + + .type __xsc3_pxa935_proc_info,#object +__xsc3_pxa935_proc_info: + .long 0x56056000 + .long 0xffffe000 + .long PMD_TYPE_SECT | \ + PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + .long PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __xsc3_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_xsc3_name + .long xsc3_processor_functions + .long v4wbi_tlb_fns + .long xsc3_mc_user_fns + .long xsc3_cache_fns + .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info |