diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-v4wb.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-v4wt.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 39 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020e.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1022.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1026.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm720.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm920.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm922.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm925.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-sa1100.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 6 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 4 |
20 files changed, 63 insertions, 26 deletions
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index d3644db..f40c696 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -32,7 +32,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. * * Size Clean (ticks) Dirty (ticks) * 4096 21 20 21 53 55 54 diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 49c2b66..a7b276d 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -34,7 +34,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. * * *** This needs benchmarking */ diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 6136e68..dc18d81 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -96,7 +96,7 @@ ENDPROC(v7_flush_dcache_all) * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache - * maintainance instructions. + * maintenance instructions. * The instruction cache can still be invalidated back to the point of * unification in a single instruction. * diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e5f6fc4..76f82ae 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -201,6 +201,20 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } } +#ifdef CONFIG_ZONE_DMA +static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, + unsigned long dma_size) +{ + if (size[0] <= dma_size) + return; + + size[ZONE_NORMAL] = size[0] - dma_size; + size[ZONE_DMA] = dma_size; + hole[ZONE_NORMAL] = hole[0]; + hole[ZONE_DMA] = 0; +} +#endif + static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, unsigned long max_high) { @@ -243,11 +257,18 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, #endif } +#ifdef ARM_DMA_ZONE_SIZE +#ifndef CONFIG_ZONE_DMA +#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations +#endif + /* * Adjust the sizes according to any special requirements for * this machine type. */ - arch_adjust_zones(zone_size, zhole_size); + arm_adjust_dma_zone(zone_size, zhole_size, + ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); +#endif free_area_init_node(0, zone_size, min, zhole_size); } @@ -392,7 +413,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn); + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -426,6 +447,14 @@ static void __init free_unused_memmap(struct meminfo *mi) bank_start = bank_pfn_start(bank); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + bank_start = min(bank_start, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. @@ -440,6 +469,12 @@ static void __init free_unused_memmap(struct meminfo *mi) */ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) + free_memmap(prev_bank_end, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif } static void __init free_highpages(void) diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index afe209e..74be05f 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -7,6 +7,7 @@ #include <linux/shm.h> #include <linux/sched.h> #include <linux/io.h> +#include <linux/personality.h> #include <linux/random.h> #include <asm/cputype.h> #include <asm/system.h> @@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ - if (current->flags & PF_RANDOMIZE) + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 226e3d8..6c4e7fd 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -64,7 +64,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 86d9c2c..4ce947c 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -64,7 +64,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 83d3dd3..c8884c5 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -53,7 +53,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 686043e..4136846 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -53,7 +53,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 665266d..7a06e59 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -63,7 +63,7 @@ ENTRY(cpu_arm720_proc_fin) /* * Function: arm720_proc_do_idle(void) * Params : r0 = unused - * Purpose : put the processer in proper idle mode + * Purpose : put the processor in proper idle mode */ ENTRY(cpu_arm720_do_idle) mov pc, lr diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 219980e..bf8a1d1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -53,7 +53,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 65536 @@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 36154b1..95ba1fc 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -54,7 +54,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. (I think this should + * cache line maintenance instructions. (I think this should * be 32768). */ #define CACHE_DLIMIT 8192 diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 89c5e000..541e477 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -77,7 +77,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 8192 diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 6a4bdb2..0ed85d9 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index e32fa49..34261f9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -85,7 +85,7 @@ /* * Sanity check the PTE configuration for the code below - which makes - * certain assumptions about how these bits are layed out. + * certain assumptions about how these bits are laid out. */ #ifdef CONFIG_MMU #if L_PTE_SHARED != PTE_EXT_SHARED diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 74483d1..184a9c9 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4*4 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 832b6bd..7c99cb4 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID @@ -132,7 +132,7 @@ ENTRY(cpu_v6_do_suspend) mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 - mrc p15, 0, r9, c1, c0, 1 @ auxillary control register + mrc p15, 0, r9, c1, c0, 1 @ auxiliary control register mrc p15, 0, r10, c1, c0, 2 @ co-processor access control mrc p15, 0, r11, c1, c0, 0 @ control register stmia r0, {r4 - r11} @@ -151,7 +151,7 @@ ENTRY(cpu_v6_do_resume) mcr p15, 0, r6, c3, c0, 0 @ Domain ID mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 - mcr p15, 0, r9, c1, c0, 1 @ auxillary control register + mcr p15, 0, r9, c1, c0, 1 @ auxiliary control register mcr p15, 0, r10, c1, c0, 2 @ co-processor access control mcr p15, 0, ip, c2, c0, 2 @ TTB control register mcr p15, 0, ip, c7, c5, 4 @ ISB diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 262fa88..babfba09 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -211,7 +211,7 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID @@ -237,7 +237,7 @@ ENTRY(cpu_v7_do_resume) mcr p15, 0, r7, c2, c0, 0 @ TTB 0 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 mcr p15, 0, ip, c2, c0, 2 @ TTB control register - mcr p15, 0, r10, c1, c0, 1 @ Auxillary control register + mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control ldr r4, =PRRR @ PRRR ldr r5, =NMRR @ NMRR diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 63d8b20..5962136 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 086038c..42af976 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -395,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area) teq r2, #DMA_TO_DEVICE beq xscale_dma_clean_range b xscale_dma_flush_range -ENDPROC(xscsale_dma_a0_map_area) +ENDPROC(xscale_dma_a0_map_area) /* * dma_unmap_area(start, size, dir) @@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 7 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |