diff options
Diffstat (limited to 'arch/arm/mm')
38 files changed, 341 insertions, 666 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 101105e..87ec141 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -717,17 +717,6 @@ config TLS_REG_EMUL a few prototypes like that in existence) and therefore access to that required register must be emulated. -config HAS_TLS_REG - bool - depends on !TLS_REG_EMUL - default y if SMP || CPU_32v7 - help - This selects support for the CP15 thread register. - It is defined to be available on some ARMv6 processors (including - all SMP capable ARMv6's) or later processors. User space may - assume directly accessing that register and always obtain the - expected value only on ARMv7 and above. - config NEEDS_SYSCALL_FOR_CMPXCHG bool help diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index e8d34a80..d63b6c4 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -15,7 +15,6 @@ endif obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o -obj-$(CONFIG_DISCONTIGMEM) += discontig.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f98c35..d073b64 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -924,8 +924,20 @@ static int __init alignment_init(void) ai_usermode = UM_FIXUP; } - hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); - hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); + hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); + + /* + * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section + * fault, not as alignment error. + * + * TODO: handle ARMv6K properly. Runtime check for 'K' extension is + * needed. + */ + if (cpu_architecture() <= CPU_ARCH_ARMv6) { + hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); + } return 0; } diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c deleted file mode 100644 index c8c0c4b..0000000 --- a/arch/arm/mm/discontig.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * linux/arch/arm/mm/discontig.c - * - * Discontiguous memory support. - * - * Initial code: Copyright (C) 1999-2000 Nicolas Pitre - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/module.h> -#include <linux/mmzone.h> -#include <linux/bootmem.h> - -#if MAX_NUMNODES != 4 && MAX_NUMNODES != 16 -# error Fix Me Please -#endif - -/* - * Our node_data structure for discontiguous memory. - */ - -pg_data_t discontig_node_data[MAX_NUMNODES] = { - { .bdata = &bootmem_node_data[0] }, - { .bdata = &bootmem_node_data[1] }, - { .bdata = &bootmem_node_data[2] }, - { .bdata = &bootmem_node_data[3] }, -#if MAX_NUMNODES == 16 - { .bdata = &bootmem_node_data[4] }, - { .bdata = &bootmem_node_data[5] }, - { .bdata = &bootmem_node_data[6] }, - { .bdata = &bootmem_node_data[7] }, - { .bdata = &bootmem_node_data[8] }, - { .bdata = &bootmem_node_data[9] }, - { .bdata = &bootmem_node_data[10] }, - { .bdata = &bootmem_node_data[11] }, - { .bdata = &bootmem_node_data[12] }, - { .bdata = &bootmem_node_data[13] }, - { .bdata = &bootmem_node_data[14] }, - { .bdata = &bootmem_node_data[15] }, -#endif -}; - -EXPORT_SYMBOL(discontig_node_data); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9e7742f..c704eed 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -183,6 +183,8 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) { struct arm_vmregion *c; + size_t align; + int bit; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); @@ -191,9 +193,20 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) } /* + * Align the virtual region allocation - maximum alignment is + * a section size, minimum is a page size. This helps reduce + * fragmentation of the DMA space, and also prevents allocations + * smaller than a section from crossing a section boundary. + */ + bit = fls(size - 1) + 1; + if (bit > SECTION_SHIFT) + bit = SECTION_SHIFT; + align = 1 << bit; + + /* * Allocate a virtual address in the consistent mapping region. */ - c = arm_vmregion_alloc(&consistent_head, size, + c = arm_vmregion_alloc(&consistent_head, align, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index cbfb2ed..23b0b03 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -413,7 +413,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr, pmd_k = pmd_offset(pgd_k, addr); pmd = pmd_offset(pgd, addr); - if (pmd_none(*pmd_k)) + /* + * On ARM one Linux PGD entry contains two hardware entries (see page + * tables layout in pgtable.h). We normally guarantee that we always + * fill both L1 entries. But create_mapping() doesn't follow the rule. + * It can create inidividual L1 entries, so here we have to call + * pmd_none() check for the entry really corresponded to address, not + * for the first of pair. + */ + index = (addr >> SECTION_SHIFT) & 1; + if (pmd_none(pmd_k[index])) goto bad_area; copy_pmd(pmd, pmd_k); @@ -463,15 +472,10 @@ static struct fsr_info { * defines these to be "precise" aborts. */ { do_bad, SIGSEGV, 0, "vector exception" }, - { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, { do_bad, SIGKILL, 0, "terminal exception" }, - { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, -/* Do we need runtime check ? */ -#if __LINUX_ARM_ARCH__ < 6 + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, { do_bad, SIGBUS, 0, "external abort on linefetch" }, -#else - { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" }, -#endif { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, { do_bad, SIGBUS, 0, "external abort on linefetch" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, @@ -508,13 +512,15 @@ static struct fsr_info { void __init hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), - int sig, const char *name) + int sig, int code, const char *name) { - if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) { - fsr_info[nr].fn = fn; - fsr_info[nr].sig = sig; - fsr_info[nr].name = name; - } + if (nr < 0 || nr >= ARRAY_SIZE(fsr_info)) + BUG(); + + fsr_info[nr].fn = fn; + fsr_info[nr].sig = sig; + fsr_info[nr].code = code; + fsr_info[nr].name = name; } /* @@ -594,3 +600,25 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) arm_notify_die("", regs, &info, ifsr, 0); } +static int __init exceptions_init(void) +{ + if (cpu_architecture() >= CPU_ARCH_ARMv6) { + hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR, + "I-cache maintenance fault"); + } + + if (cpu_architecture() >= CPU_ARCH_ARMv7) { + /* + * TODO: Access flag faults introduced in ARMv6K. + * Runtime check for 'K' extension is needed + */ + hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR, + "section access flag fault"); + hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR, + "section access flag fault"); + } + + return 0; +} + +arch_initcall(exceptions_init); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f6a9994..7185b00 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,6 +17,7 @@ #include <linux/initrd.h> #include <linux/highmem.h> #include <linux/gfp.h> +#include <linux/memblock.h> #include <asm/mach-types.h> #include <asm/sections.h> @@ -79,38 +80,37 @@ struct meminfo meminfo; void show_mem(void) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, node, i; + int shared = 0, cached = 0, slab = 0, i; struct meminfo * mi = &meminfo; printk("Mem-info:\n"); show_free_areas(); - for_each_online_node(node) { - for_each_nodebank (i,mi,node) { - struct membank *bank = &mi->bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - page++; - } while (page < end); - } + + for_each_bank (i, mi) { + struct membank *bank = &mi->bank[i]; + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = bank_pfn_start(bank); + pfn2 = bank_pfn_end(bank); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; + + do { + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (PageSlab(page)) + slab++; + else if (!page_count(page)) + free++; + else + shared += page_count(page) - 1; + page++; + } while (page < end); } printk("%d pages of RAM\n", total); @@ -121,7 +121,7 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -static void __init find_node_limits(int node, struct meminfo *mi, +static void __init find_limits(struct meminfo *mi, unsigned long *min, unsigned long *max_low, unsigned long *max_high) { int i; @@ -129,7 +129,7 @@ static void __init find_node_limits(int node, struct meminfo *mi, *min = -1UL; *max_low = *max_high = 0; - for_each_nodebank(i, mi, node) { + for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; unsigned long start, end; @@ -147,155 +147,64 @@ static void __init find_node_limits(int node, struct meminfo *mi, } } -/* - * FIXME: We really want to avoid allocating the bootmap bitmap - * over the top of the initrd. Hopefully, this is located towards - * the start of a bank, so if we allocate the bootmap bitmap at - * the end, we won't clash. - */ -static unsigned int __init -find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) -{ - unsigned int start_pfn, i, bootmap_pfn; - - start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; - bootmap_pfn = 0; - - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - unsigned int start, end; - - start = bank_pfn_start(bank); - end = bank_pfn_end(bank); - - if (end < start_pfn) - continue; - - if (start < start_pfn) - start = start_pfn; - - if (end <= start) - continue; - - if (end - start >= bootmap_pages) { - bootmap_pfn = start; - break; - } - } - - if (bootmap_pfn == 0) - BUG(); - - return bootmap_pfn; -} - -static int __init check_initrd(struct meminfo *mi) -{ - int initrd_node = -2; -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long end = phys_initrd_start + phys_initrd_size; - - /* - * Make sure that the initrd is within a valid area of - * memory. - */ - if (phys_initrd_size) { - unsigned int i; - - initrd_node = -1; - - for (i = 0; i < mi->nr_banks; i++) { - struct membank *bank = &mi->bank[i]; - if (bank_phys_start(bank) <= phys_initrd_start && - end <= bank_phys_end(bank)) - initrd_node = bank->node; - } - } - - if (initrd_node == -1) { - printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " - "physical memory - disabling initrd\n", - phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; - } -#endif - - return initrd_node; -} - -static void __init bootmem_init_node(int node, struct meminfo *mi, +static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { - unsigned long boot_pfn; unsigned int boot_pages; + phys_addr_t bitmap; pg_data_t *pgdat; int i; /* - * Allocate the bootmem bitmap page. + * Allocate the bootmem bitmap page. This must be in a region + * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - boot_pfn = find_bootmap_pfn(node, mi, boot_pages); + bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, + __pfn_to_phys(end_pfn)); /* - * Initialise the bootmem allocator for this node, handing the + * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ - node_set_online(node); - pgdat = NODE_DATA(node); - init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); + node_set_online(0); + pgdat = NODE_DATA(0); + init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; if (!bank->highmem) - free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); + free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); } /* - * Reserve the bootmem bitmap for this node. + * Reserve the memblock reserved regions in bootmem. */ - reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, - boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); -} - -static void __init bootmem_reserve_initrd(int node) -{ -#ifdef CONFIG_BLK_DEV_INITRD - pg_data_t *pgdat = NODE_DATA(node); - int res; - - res = reserve_bootmem_node(pgdat, phys_initrd_start, - phys_initrd_size, BOOTMEM_EXCLUSIVE); - - if (res == 0) { - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } else { - printk(KERN_ERR - "INITRD: 0x%08lx+0x%08lx overlaps in-use " - "memory region - disabling initrd\n", - phys_initrd_start, phys_initrd_size); + for (i = 0; i < memblock.reserved.cnt; i++) { + phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); + if (start >= start_pfn && + memblock_end_pfn(&memblock.reserved, i) <= end_pfn) + reserve_bootmem_node(pgdat, __pfn_to_phys(start), + memblock_size_bytes(&memblock.reserved, i), + BOOTMEM_DEFAULT); } -#endif } -static void __init bootmem_free_node(int node, struct meminfo *mi) +static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, + unsigned long max_low, unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long min, max_low, max_high; int i; - find_node_limits(node, mi, &min, &max_low, &max_high); - /* - * initialise the zones within this node. + * initialise the zones. */ memset(zone_size, 0, sizeof(zone_size)); /* - * The size of this node has already been determined. If we need - * to do anything fancy with the allocation of this memory to the - * zones, now is the time to do it. + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. */ zone_size[0] = max_low - min; #ifdef CONFIG_HIGHMEM @@ -303,11 +212,11 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) #endif /* - * For each bank in this node, calculate the size of the holes. - * holes = node_size - sum(bank_sizes_in_node) + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) */ memcpy(zhole_size, zone_size, sizeof(zhole_size)); - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { int idx = 0; #ifdef CONFIG_HIGHMEM if (mi->bank[i].highmem) @@ -320,24 +229,23 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) * Adjust the sizes according to any special requirements for * this machine type. */ - arch_adjust_zones(node, zone_size, zhole_size); + arch_adjust_zones(zone_size, zhole_size); - free_area_init_node(node, zone_size, min, zhole_size); + free_area_init_node(0, zone_size, min, zhole_size); } #ifndef CONFIG_SPARSEMEM int pfn_valid(unsigned long pfn) { - struct meminfo *mi = &meminfo; - unsigned int left = 0, right = mi->nr_banks; + struct memblock_region *mem = &memblock.memory; + unsigned int left = 0, right = mem->cnt; do { unsigned int mid = (right + left) / 2; - struct membank *bank = &mi->bank[mid]; - if (pfn < bank_pfn_start(bank)) + if (pfn < memblock_start_pfn(mem, mid)) right = mid; - else if (pfn >= bank_pfn_end(bank)) + else if (pfn >= memblock_end_pfn(mem, mid)) left = mid + 1; else return 1; @@ -346,73 +254,69 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); -static void arm_memory_present(struct meminfo *mi, int node) +static void arm_memory_present(void) { } #else -static void arm_memory_present(struct meminfo *mi, int node) +static void arm_memory_present(void) { int i; - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); - } + for (i = 0; i < memblock.memory.cnt; i++) + memory_present(0, memblock_start_pfn(&memblock.memory, i), + memblock_end_pfn(&memblock.memory, i)); } #endif -void __init bootmem_init(void) +void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) { - struct meminfo *mi = &meminfo; - unsigned long min, max_low, max_high; - int node, initrd_node; + int i; - /* - * Locate which node contains the ramdisk image, if any. - */ - initrd_node = check_initrd(mi); + memblock_init(); + for (i = 0; i < mi->nr_banks; i++) + memblock_add(mi->bank[i].start, mi->bank[i].size); - max_low = max_high = 0; + /* Register the kernel text, kernel data and initrd with memblock. */ +#ifdef CONFIG_XIP_KERNEL + memblock_reserve(__pa(_data), _end - _data); +#else + memblock_reserve(__pa(_stext), _end - _stext); +#endif +#ifdef CONFIG_BLK_DEV_INITRD + if (phys_initrd_size) { + memblock_reserve(phys_initrd_start, phys_initrd_size); - /* - * Run through each node initialising the bootmem allocator. - */ - for_each_node(node) { - unsigned long node_low, node_high; + /* Now convert initrd to virtual addresses */ + initrd_start = __phys_to_virt(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; + } +#endif - find_node_limits(node, mi, &min, &node_low, &node_high); + arm_mm_memblock_reserve(); - if (node_low > max_low) - max_low = node_low; - if (node_high > max_high) - max_high = node_high; + /* reserve any platform specific memblock areas */ + if (mdesc->reserve) + mdesc->reserve(); - /* - * If there is no memory in this node, ignore it. - * (We can't have nodes which have no lowmem) - */ - if (node_low == 0) - continue; + memblock_analyze(); + memblock_dump_all(); +} - bootmem_init_node(node, mi, min, node_low); +void __init bootmem_init(void) +{ + struct meminfo *mi = &meminfo; + unsigned long min, max_low, max_high; - /* - * Reserve any special node zero regions. - */ - if (node == 0) - reserve_node_zero(NODE_DATA(node)); + max_low = max_high = 0; - /* - * If the initrd is in this node, reserve its memory. - */ - if (node == initrd_node) - bootmem_reserve_initrd(node); + find_limits(mi, &min, &max_low, &max_high); - /* - * Sparsemem tries to allocate bootmem in memory_present(), - * so must be done after the fixed reservations - */ - arm_memory_present(mi, node); - } + arm_bootmem_init(mi, min, max_low); + + /* + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations + */ + arm_memory_present(); /* * sparse_init() needs the bootmem allocator up and running. @@ -420,12 +324,11 @@ void __init bootmem_init(void) sparse_init(); /* - * Now free memory in each node - free_area_init_node needs + * Now free the memory - free_area_init_node needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ - for_each_node(node) - bootmem_free_node(node, mi); + arm_bootmem_free(mi, min, max_low, max_high); high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; @@ -460,7 +363,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) } static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; @@ -483,40 +386,39 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) * free the section of the memmap array. */ if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); + free_bootmem(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap_node(int node, struct meminfo *mi) +static void __init free_unused_memmap(struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). */ - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; bank_start = bank_pfn_start(bank); - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); + if (prev_bank_end && prev_bank_end < bank_start) + free_memmap(prev_bank_end, bank_start); - prev_bank_end = bank_pfn_end(bank); + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } } @@ -528,21 +430,19 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) void __init mem_init(void) { unsigned long reserved_pages, free_pages; - int i, node; + int i; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; +#endif -#ifndef CONFIG_DISCONTIGMEM max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; -#endif /* this will put all unused low memory onto the freelists */ - for_each_online_node(node) { - pg_data_t *pgdat = NODE_DATA(node); + free_unused_memmap(&meminfo); - free_unused_memmap_node(node, &meminfo); - - if (pgdat->node_spanned_pages != 0) - totalram_pages += free_all_bootmem_node(pgdat); - } + totalram_pages += free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ @@ -552,39 +452,35 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM /* set highmem page free */ - for_each_online_node(node) { - for_each_nodebank (i, &meminfo, node) { - unsigned long start = bank_pfn_start(&meminfo.bank[i]); - unsigned long end = bank_pfn_end(&meminfo.bank[i]); - if (start >= max_low_pfn + PHYS_PFN_OFFSET) - totalhigh_pages += free_area(start, end, NULL); - } + for_each_bank (i, &meminfo) { + unsigned long start = bank_pfn_start(&meminfo.bank[i]); + unsigned long end = bank_pfn_end(&meminfo.bank[i]); + if (start >= max_low_pfn + PHYS_PFN_OFFSET) + totalhigh_pages += free_area(start, end, NULL); } totalram_pages += totalhigh_pages; #endif reserved_pages = free_pages = 0; - for_each_online_node(node) { - for_each_nodebank(i, &meminfo, node) { - struct membank *bank = &meminfo.bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } + for_each_bank(i, &meminfo) { + struct membank *bank = &meminfo.bank[i]; + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = bank_pfn_start(bank); + pfn2 = bank_pfn_end(bank); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; + + do { + if (PageReserved(page)) + reserved_pages++; + else if (!page_count(page)) + free_pages++; + page++; + } while (page < end); } /* @@ -611,6 +507,10 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_MMU " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" @@ -627,6 +527,10 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), +#endif MLK(FIXADDR_START, FIXADDR_TOP), #ifdef CONFIG_MMU MLM(CONSISTENT_BASE, CONSISTENT_END), diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 28c8b95..ab50627 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -42,78 +42,11 @@ */ #define VM_ARM_SECTION_MAPPING 0x80000000 -static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, - unsigned long phys_addr, const struct mem_type *type) -{ - pgprot_t prot = __pgprot(type->prot_pte); - pte_t *pte; - - pte = pte_alloc_kernel(pmd, addr); - if (!pte) - return -ENOMEM; - - do { - if (!pte_none(*pte)) - goto bad; - - set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); - phys_addr += PAGE_SIZE; - } while (pte++, addr += PAGE_SIZE, addr != end); - return 0; - - bad: - printk(KERN_CRIT "remap_area_pte: page already exists\n"); - BUG(); -} - -static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys_addr, - const struct mem_type *type) -{ - unsigned long next; - pmd_t *pmd; - int ret = 0; - - pmd = pmd_alloc(&init_mm, pgd, addr); - if (!pmd) - return -ENOMEM; - - do { - next = pmd_addr_end(addr, end); - ret = remap_area_pte(pmd, addr, next, phys_addr, type); - if (ret) - return ret; - phys_addr += next - addr; - } while (pmd++, addr = next, addr != end); - return ret; -} - -static int remap_area_pages(unsigned long start, unsigned long pfn, - size_t size, const struct mem_type *type) -{ - unsigned long addr = start; - unsigned long next, end = start + size; - unsigned long phys_addr = __pfn_to_phys(pfn); - pgd_t *pgd; - int err = 0; - - BUG_ON(addr >= end); - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, end); - err = remap_area_pmd(pgd, addr, next, phys_addr, type); - if (err) - break; - phys_addr += next - addr; - } while (pgd++, addr = next, addr != end); - - return err; -} - int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { - return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); + return ioremap_page_range(virt, virt + PAGE_SIZE, phys, + __pgprot(mtype->prot_pte)); } EXPORT_SYMBOL(ioremap_page); @@ -268,6 +201,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; + /* + * Don't allow RAM to be mapped - this causes problems with ARMv6+ + */ + if (WARN_ON(pfn_valid(pfn))) + return NULL; + type = get_mem_type(mtype); if (!type) return NULL; @@ -294,7 +233,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, err = remap_area_sections(addr, pfn, size, type); } else #endif - err = remap_area_pages(addr, pfn, size, type); + err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + __pgprot(type->prot_pte)); if (err) { vunmap((void *)addr); diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 815d08e..6630620 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -28,7 +28,5 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif -struct pglist_data; - void __init bootmem_init(void); -void reserve_node_zero(struct pglist_data *pgdat); +void arm_mm_memblock_reserve(void); diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f5abc51..4f5b396 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -7,6 +7,7 @@ #include <linux/shm.h> #include <linux/sched.h> #include <linux/io.h> +#include <linux/random.h> #include <asm/cputype.h> #include <asm/system.h> @@ -80,6 +81,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } + /* 8 bits of randomness in 20 address space bits */ + if (current->flags & PF_RANDOMIZE) + addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: if (do_align) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 2858941..6e1c4f6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -11,13 +11,12 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/mman.h> #include <linux/nodemask.h> +#include <linux/memblock.h> #include <linux/sort.h> #include <asm/cputype.h> -#include <asm/mach-types.h> #include <asm/sections.h> #include <asm/cachetype.h> #include <asm/setup.h> @@ -258,6 +257,19 @@ static struct mem_type mem_types[] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_DTCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | + L_PTE_DIRTY | L_PTE_WRITE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_ITCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_USER | L_PTE_EXEC, + .prot_l1 = PMD_TYPE_TABLE, + .domain = DOMAIN_IO, + }, }; const struct mem_type *get_mem_type(unsigned int type) @@ -488,18 +500,28 @@ static void __init build_mem_type_table(void) #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, unsigned long pfn, - const struct mem_type *type) +static void __init *early_alloc(unsigned long sz) { - pte_t *pte; + void *ptr = __va(memblock_alloc(sz, sz)); + memset(ptr, 0, sz); + return ptr; +} +static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) +{ if (pmd_none(*pmd)) { - pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); - __pmd_populate(pmd, __pa(pte) | type->prot_l1); + pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); + __pmd_populate(pmd, __pa(pte) | prot); } + BUG_ON(pmd_bad(*pmd)); + return pte_offset_kernel(pmd, addr); +} - pte = pte_offset_kernel(pmd, addr); +static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long pfn, + const struct mem_type *type) +{ + pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; @@ -668,7 +690,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(io_desc + i); } -static unsigned long __initdata vmalloc_reserve = SZ_128M; +static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); /* * vmalloc=size forces the vmalloc area to be exactly 'size' @@ -677,7 +699,7 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; */ static int __init early_vmalloc(char *arg) { - vmalloc_reserve = memparse(arg, NULL); + unsigned long vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; @@ -692,22 +714,26 @@ static int __init early_vmalloc(char *arg) "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } + + vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); return 0; } early_param("vmalloc", early_vmalloc); -#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) +phys_addr_t lowmem_end_addr; static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; + lowmem_end_addr = __pa(vmalloc_min - 1) + 1; + for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM - if (__va(bank->start) > VMALLOC_MIN || + if (__va(bank->start) > vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) highmem = 1; @@ -717,8 +743,8 @@ static void __init sanity_check_meminfo(void) * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ - if (__va(bank->start) < VMALLOC_MIN && - bank->size > VMALLOC_MIN - __va(bank->start)) { + if (__va(bank->start) < vmalloc_min && + bank->size > vmalloc_min - __va(bank->start)) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); @@ -727,12 +753,12 @@ static void __init sanity_check_meminfo(void) (meminfo.nr_banks - i) * sizeof(*bank)); meminfo.nr_banks++; i++; - bank[1].size -= VMALLOC_MIN - __va(bank->start); - bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + bank[1].size -= vmalloc_min - __va(bank->start); + bank[1].start = __pa(vmalloc_min - 1) + 1; bank[1].highmem = highmem = 1; j++; } - bank->size = VMALLOC_MIN - __va(bank->start); + bank->size = vmalloc_min - __va(bank->start); } #else bank->highmem = highmem; @@ -741,7 +767,7 @@ static void __init sanity_check_meminfo(void) * Check whether this memory bank would entirely overlap * the vmalloc area. */ - if (__va(bank->start) >= VMALLOC_MIN || + if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) { printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " "(vmalloc region overlap).\n", @@ -753,9 +779,9 @@ static void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size) > VMALLOC_MIN || + if (__va(bank->start + bank->size) > vmalloc_min || __va(bank->start + bank->size) < __va(bank->start)) { - unsigned long newsize = VMALLOC_MIN - __va(bank->start); + unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " "to -%.8lx (vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1, @@ -827,101 +853,23 @@ static inline void prepare_page_table(void) } /* - * Reserve the various regions of node 0 + * Reserve the special regions of memory */ -void __init reserve_node_zero(pg_data_t *pgdat) +void __init arm_mm_memblock_reserve(void) { - unsigned long res_size = 0; - - /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. - */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(_data), _end - _data, - BOOTMEM_DEFAULT); -#else - reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, - BOOTMEM_DEFAULT); -#endif - /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ - reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), - PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); - - /* - * Hmm... This should go elsewhere, but we really really need to - * stop things allocating the low memory; ideally we need a better - * implementation of GFP_DMA which does not assume that DMA-able - * memory starts at zero. - */ - if (machine_is_integrator() || machine_is_cintegrator()) - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; - - /* - * These should likewise go elsewhere. They pre-reserve the - * screen memory region at the start of main system memory. - */ - if (machine_is_edb7211()) - res_size = 0x00020000; - if (machine_is_p720t()) - res_size = 0x00014000; - - /* H1940, RX3715 and RX1950 need to reserve this for suspend */ - - if (machine_is_h1940() || machine_is_rx3715() - || machine_is_rx1950()) { - reserve_bootmem_node(pgdat, 0x30003000, 0x1000, - BOOTMEM_DEFAULT); - reserve_bootmem_node(pgdat, 0x30081000, 0x1000, - BOOTMEM_DEFAULT); - } - - if (machine_is_palmld() || machine_is_palmtx()) { - reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, - BOOTMEM_EXCLUSIVE); - reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, - BOOTMEM_EXCLUSIVE); - } - - if (machine_is_treo680() || machine_is_centro()) { - reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, - BOOTMEM_EXCLUSIVE); - reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, - BOOTMEM_EXCLUSIVE); - } - - if (machine_is_palmt5()) - reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, - BOOTMEM_EXCLUSIVE); - - /* - * U300 - This platform family can share physical memory - * between two ARM cpus, one running Linux and the other - * running another OS. - */ - if (machine_is_u300()) { -#ifdef CONFIG_MACH_U300_SINGLE_RAM -#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ - CONFIG_MACH_U300_2MB_ALIGNMENT_FIX - res_size = 0x00100000; -#endif -#endif - } + memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... */ - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif - if (res_size) - reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, - BOOTMEM_DEFAULT); } /* @@ -940,7 +888,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = alloc_bootmem_low_pages(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -1011,11 +959,8 @@ static void __init devicemaps_init(struct machine_desc *mdesc) static void __init kmap_init(void) { #ifdef CONFIG_HIGHMEM - pmd_t *pmd = pmd_off_k(PKMAP_BASE); - pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); - BUG_ON(!pmd_none(*pmd) || !pte); - __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); - pkmap_page_table = pte + PTRS_PER_PTE; + pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), + PKMAP_BASE, _PAGE_KERNEL_TABLE); #endif } @@ -1066,17 +1011,16 @@ void __init paging_init(struct machine_desc *mdesc) sanity_check_meminfo(); prepare_page_table(); map_lowmem(); - bootmem_init(); devicemaps_init(mdesc); kmap_init(); top_pmd = pmd_off_k(0xffff0000); - /* - * allocate the zero page. Note that this always succeeds and - * returns a zeroed result. - */ - zero_page = alloc_bootmem_low_pages(PAGE_SIZE); + /* allocate the zero page. */ + zero_page = early_alloc(PAGE_SIZE); + + bootmem_init(); + empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 33b3273..687d023 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -6,8 +6,8 @@ #include <linux/module.h> #include <linux/mm.h> #include <linux/pagemap.h> -#include <linux/bootmem.h> #include <linux/io.h> +#include <linux/memblock.h> #include <asm/cacheflush.h> #include <asm/sections.h> @@ -17,30 +17,14 @@ #include "mm.h" -/* - * Reserve the various regions of node 0 - */ -void __init reserve_node_zero(pg_data_t *pgdat) +void __init arm_mm_memblock_reserve(void) { /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. - */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(_data), _end - _data, - BOOTMEM_DEFAULT); -#else - reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, - BOOTMEM_DEFAULT); -#endif - - /* * Register the exception vector page. * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ - reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE, - BOOTMEM_DEFAULT); + memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } /* diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 72507c6..203a4e9 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020_proc_init) * cpu_arm1020_proc_fin() */ ENTRY(cpu_arm1020_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1020_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1020_reset(loc) diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index d278298..1a511e7 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020e_proc_init) * cpu_arm1020e_proc_fin() */ ENTRY(cpu_arm1020e_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1020e_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1020e_reset(loc) diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index ce13e4a..1ffa4eb 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -68,15 +68,11 @@ ENTRY(cpu_arm1022_proc_init) * cpu_arm1022_proc_fin() */ ENTRY(cpu_arm1022_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1022_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1022_reset(loc) diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 636672a..5697c34 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -68,15 +68,11 @@ ENTRY(cpu_arm1026_proc_init) * cpu_arm1026_proc_fin() */ ENTRY(cpu_arm1026_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1026_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1026_reset(loc) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 795dc61..64e0b32 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -184,8 +184,6 @@ ENTRY(cpu_arm7_proc_init) ENTRY(cpu_arm6_proc_fin) ENTRY(cpu_arm7_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov r0, #0x31 @ ....S..DP...M mcr p15, 0, r0, c1, c0, 0 @ disable caches mov pc, lr diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 0b62de2..9d96824 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -54,15 +54,11 @@ ENTRY(cpu_arm720_proc_init) mov pc, lr ENTRY(cpu_arm720_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - mcr p15, 0, r1, c7, c7, 0 @ invalidate cache - ldmfd sp!, {pc} + mov pc, lr /* * Function: arm720_proc_do_idle(void) diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 01860cd..6c1a9ab 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -36,15 +36,11 @@ ENTRY(cpu_arm740_switch_mm) * cpu_arm740_proc_fin() */ ENTRY(cpu_arm740_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x3f000000 @ bank/f/lock/s bic r0, r0, #0x0000000c @ w-buffer/cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - mcr p15, 0, r0, c7, c0, 0 @ invalidate cache - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm740_reset(loc) diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 1201b98..6a850db 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -36,8 +36,6 @@ ENTRY(cpu_arm7tdmi_switch_mm) * cpu_arm7tdmi_proc_fin() */ ENTRY(cpu_arm7tdmi_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov pc, lr /* diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 8be8199..86f80aa 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -69,19 +69,11 @@ ENTRY(cpu_arm920_proc_init) * cpu_arm920_proc_fin() */ ENTRY(cpu_arm920_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip -#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH - bl arm920_flush_kern_cache_all -#else - bl v4wt_flush_kern_cache_all -#endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm920_reset(loc) diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index c0ff8e4..f76ce9b 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -71,19 +71,11 @@ ENTRY(cpu_arm922_proc_init) * cpu_arm922_proc_fin() */ ENTRY(cpu_arm922_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip -#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH - bl arm922_flush_kern_cache_all -#else - bl v4wt_flush_kern_cache_all -#endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm922_reset(loc) diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 3c6cffe..657bd3f 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -92,15 +92,11 @@ ENTRY(cpu_arm925_proc_init) * cpu_arm925_proc_fin() */ ENTRY(cpu_arm925_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm925_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm925_reset(loc) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 75b707c..73f1f3c 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -61,15 +61,11 @@ ENTRY(cpu_arm926_proc_init) * cpu_arm926_proc_fin() */ ENTRY(cpu_arm926_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm926_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm926_reset(loc) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 1af1657..fffb061 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -37,15 +37,11 @@ ENTRY(cpu_arm940_switch_mm) * cpu_arm940_proc_fin() */ ENTRY(cpu_arm940_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm940_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm940_reset(loc) diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1664b6a..249a605 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -44,15 +44,11 @@ ENTRY(cpu_arm946_switch_mm) * cpu_arm946_proc_fin() */ ENTRY(cpu_arm946_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm946_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm946_reset(loc) diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 28545c2..db47566 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -36,8 +36,6 @@ ENTRY(cpu_arm9tdmi_switch_mm) * cpu_arm9tdmi_proc_fin() */ ENTRY(cpu_arm9tdmi_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov pc, lr /* diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08f5ac2..7803fdf 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -39,17 +39,13 @@ ENTRY(cpu_fa526_proc_init) * cpu_fa526_proc_fin() */ ENTRY(cpu_fa526_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl fa_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches nop nop - ldmfd sp!, {pc} + mov pc, lr /* * cpu_fa526_reset(loc) diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 53e6323..b304d01 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -75,11 +75,6 @@ ENTRY(cpu_feroceon_proc_init) * cpu_feroceon_proc_fin() */ ENTRY(cpu_feroceon_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl feroceon_flush_kern_cache_all - #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) mov r0, #0 @@ -91,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin) bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_feroceon_reset(loc) diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index caa3115..5f6892f 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -51,15 +51,11 @@ ENTRY(cpu_mohawk_proc_init) * cpu_mohawk_proc_fin() */ ENTRY(cpu_mohawk_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl mohawk_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...iz........... bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_mohawk_reset(loc) diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 7b706b3..a201eb0 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -44,17 +44,13 @@ ENTRY(cpu_sa110_proc_init) * cpu_sa110_proc_fin() */ ENTRY(cpu_sa110_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl v4wb_flush_kern_cache_all @ clean caches -1: mov r0, #0 + mov r0, #0 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_sa110_reset(loc) diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 5c47760..7ddc480 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -55,16 +55,12 @@ ENTRY(cpu_sa1100_proc_init) * - Clean and turn off caches. */ ENTRY(cpu_sa1100_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl v4wb_flush_kern_cache_all mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_sa1100_reset(loc) diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 7a5337e..22aac85 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -42,14 +42,11 @@ ENTRY(cpu_v6_proc_init) mov pc, lr ENTRY(cpu_v6_proc_fin) - stmfd sp!, {lr} - cpsid if @ disable interrupts - bl v6_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_v6_reset(loc) @@ -239,7 +236,8 @@ __v6_proc_info: b __v6_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA + /* See also feat_v6_fixup() for HWCAP_TLS */ + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS .long cpu_v6_name .long v6_processor_functions .long v6wbi_tlb_fns @@ -262,7 +260,7 @@ __pj4_v6_proc_info: b __v6_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS .long cpu_pj4_name .long v6_processor_functions .long v6wbi_tlb_fns diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7aaf88a..6a8506d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -45,14 +45,11 @@ ENTRY(cpu_v7_proc_init) ENDPROC(cpu_v7_proc_init) ENTRY(cpu_v7_proc_fin) - stmfd sp!, {lr} - cpsid if @ disable interrupts - bl v7_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr ENDPROC(cpu_v7_proc_fin) /* @@ -344,7 +341,7 @@ __v7_proc_info: b __v7_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS .long cpu_v7_name .long v7_processor_functions .long v7wbi_tlb_fns diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index e5797f1..361a51e 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -90,15 +90,11 @@ ENTRY(cpu_xsc3_proc_init) * cpu_xsc3_proc_fin() */ ENTRY(cpu_xsc3_proc_fin) - str lr, [sp, #-4]! - mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE - msr cpsr_c, r0 - bl xsc3_flush_kern_cache_all @ clean caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldr pc, [sp], #4 + mov pc, lr /* * cpu_xsc3_reset(loc) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 63037e2..1407597 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -124,15 +124,11 @@ ENTRY(cpu_xscale_proc_init) * cpu_xscale_proc_fin() */ ENTRY(cpu_xscale_proc_fin) - str lr, [sp, #-4]! - mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE - msr cpsr_c, r0 - bl xscale_flush_kern_cache_all @ clean caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldr pc, [sp], #4 + mov pc, lr /* * cpu_xscale_reset(loc) diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 19e09bdb..935993e 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c @@ -35,7 +35,8 @@ */ struct arm_vmregion * -arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) +arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, + size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -58,7 +59,7 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) goto nospc; if ((addr + size) <= c->vm_start) goto found; - addr = c->vm_end; + addr = ALIGN(c->vm_end, align); if (addr > end) goto nospc; } diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 6b2cdbd..15e9f04 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h @@ -21,7 +21,7 @@ struct arm_vmregion { int vm_active; }; -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); +struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); |