From 8455e6ec70f33b0e8c3ffd47067e00481f09f454 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2013 11:47:41 +0100 Subject: arm64: perf: fix group validation when using enable_on_exec This is a port of cb2d8b342aa0 ("ARM: 7698/1: perf: fix group validation when using enable_on_exec") to arm64, which fixes the event validation checking so that events in the OFF state are still considered when enable_on_exec is true. Cc: Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 12e6ccb..2a1e916 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_events, if (is_software_event(event)) return 1; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, &fake_event) >= 0; -- cgit v0.10.2 From 178cd9ce377232518ec17ff2ecab2e80fa60784c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2013 11:47:42 +0100 Subject: arm64: perf: fix ARMv8 EVTYPE_MASK to include NSH bit This is a port of f2fe09b055e2 ("ARM: 7663/1: perf: fix ARMv7 EVTYPE_MASK to include NSH bit") to arm64, which fixes the broken evtype mask to include the NSH bit, allowing profiling at EL2. Cc: Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 2a1e916..cea1594 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -784,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] /* * PMXEVTYPER: Event selection reg */ -#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ /* -- cgit v0.10.2 From 4cfb36136480c029a29dbf63a623506e6ed7282b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 9 Jul 2013 14:18:12 +0100 Subject: arm64: add support for kernel mode NEON Add containing kernel_neon_begin/kernel_neon_end function declarations and corresponding definitions in fpsimd.c These are needed to wrap uses of NEON in kernel mode. The names are identical to the ones used in arm/ so code using intrinsics or vectorized by GCC can be shared between arm and arm64. Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9737e97..ae323a4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -96,6 +96,9 @@ config SWIOTLB config IOMMU_HELPER def_bool SWIOTLB +config KERNEL_MODE_NEON + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h new file mode 100644 index 0000000..b0cc58a9 --- /dev/null +++ b/arch/arm64/include/asm/neon.h @@ -0,0 +1,14 @@ +/* + * linux/arch/arm64/include/asm/neon.h + * + * Copyright (C) 2013 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define cpu_has_neon() (1) + +void kernel_neon_begin(void); +void kernel_neon_end(void); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index e8b8357..1f2e4d5 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -83,6 +84,33 @@ void fpsimd_flush_thread(void) fpsimd_load_state(¤t->thread.fpsimd_state); } +#ifdef CONFIG_KERNEL_MODE_NEON + +/* + * Kernel-side NEON support functions + */ +void kernel_neon_begin(void) +{ + /* Avoid using the NEON in interrupt context */ + BUG_ON(in_interrupt()); + preempt_disable(); + + if (current->mm) + fpsimd_save_state(¤t->thread.fpsimd_state); +} +EXPORT_SYMBOL(kernel_neon_begin); + +void kernel_neon_end(void) +{ + if (current->mm) + fpsimd_load_state(¤t->thread.fpsimd_state); + + preempt_enable(); +} +EXPORT_SYMBOL(kernel_neon_end); + +#endif /* CONFIG_KERNEL_MODE_NEON */ + /* * FP/SIMD support code initialisation. */ -- cgit v0.10.2 From 360b35a874f130305715b1b854b67dc40826fc91 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Wed, 21 Aug 2013 10:50:17 +0100 Subject: ARM64: include: asm: include "asm/types.h" in "pgtable-2level-types.h" and "pgtable-3level-types.h" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Need include "asm/types.h", just like arm has done, or can not pass compiling, the related error: In file included from arch/arm64/include/asm/page.h:37:0, from drivers/staging/lustre/include/linux/lnet/linux/lib-lnet.h:42, from drivers/staging/lustre/include/linux/lnet/lib-lnet.h:44, from drivers/staging/lustre/lnet/lnet/api-ni.c:38: arch/arm64/include/asm/pgtable-2level-types.h:19:1: error: unknown type name ‘u64 arch/arm64/include/asm/pgtable-2level-types.h:20:1: error: unknown type name ‘u64’ Signed-off-by: Chen Gang Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h index 3c3ca7d..5f101e6 100644 --- a/arch/arm64/include/asm/pgtable-2level-types.h +++ b/arch/arm64/include/asm/pgtable-2level-types.h @@ -16,6 +16,8 @@ #ifndef __ASM_PGTABLE_2LEVEL_TYPES_H #define __ASM_PGTABLE_2LEVEL_TYPES_H +#include + typedef u64 pteval_t; typedef u64 pgdval_t; typedef pgdval_t pmdval_t; diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h index 4489615..4e94424 100644 --- a/arch/arm64/include/asm/pgtable-3level-types.h +++ b/arch/arm64/include/asm/pgtable-3level-types.h @@ -16,6 +16,8 @@ #ifndef __ASM_PGTABLE_3LEVEL_TYPES_H #define __ASM_PGTABLE_3LEVEL_TYPES_H +#include + typedef u64 pteval_t; typedef u64 pmdval_t; typedef u64 pgdval_t; -- cgit v0.10.2 From 4370eec05a887b0cd4392cd5dc5b2713174745c0 Mon Sep 17 00:00:00 2001 From: Roy Franz Date: Thu, 15 Aug 2013 00:10:00 +0100 Subject: arm64: Expand arm64 image header Expand the arm64 image header to allow for co-existance with PE/COFF header required by the EFI stub. The PE/COFF format requires the "MZ" header to be at offset 0, and the offset to the PE/COFF header to be at offset 0x3c. The image header is expanded to allow 2 instructions at the beginning to accommodate a benign intruction at offset 0 that includes the "MZ" header, a magic number, and the offset to the PE/COFF header. Signed-off-by: Roy Franz Signed-off-by: Catalin Marinas diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index 9c4d388..5273c4d 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt @@ -68,13 +68,23 @@ Image target is available instead. Requirement: MANDATORY -The decompressed kernel image contains a 32-byte header as follows: +The decompressed kernel image contains a 64-byte header as follows: - u32 magic = 0x14000008; /* branch to stext, little-endian */ - u32 res0 = 0; /* reserved */ + u32 code0; /* Executable code */ + u32 code1; /* Executable code */ u64 text_offset; /* Image load offset */ + u64 res0 = 0; /* reserved */ u64 res1 = 0; /* reserved */ u64 res2 = 0; /* reserved */ + u64 res3 = 0; /* reserved */ + u64 res4 = 0; /* reserved */ + u32 magic = 0x644d5241; /* Magic number, little endian, "ARM\x64" */ + u32 res5 = 0; /* reserved */ + + +Header notes: + +- code0/code1 are responsible for branching to stext. The image must be placed at the specified offset (currently 0x80000) from the start of the system RAM and called there. The start of the diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 53dcae4..7090c12 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -112,6 +112,14 @@ .quad TEXT_OFFSET // Image load offset from start of RAM .quad 0 // reserved .quad 0 // reserved + .quad 0 // reserved + .quad 0 // reserved + .quad 0 // reserved + .byte 0x41 // Magic number, "ARM\x64" + .byte 0x52 + .byte 0x4d + .byte 0x64 + .word 0 // reserved ENTRY(stext) mov x21, x0 // x21=FDT -- cgit v0.10.2 From 2600e130b3c90c8d6c13229d3d3a14dcb898a87b Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 22 Aug 2013 11:47:37 +0100 Subject: arm64: Enable interrupts in the EL0 undef handler do_undefinstr() has to be called with interrupts disabled since it may read the instruction from the user address space which could lead to a data abort and subsequent might_sleep() warning in do_page_fault(). Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 6ad781b..57640df 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -476,6 +476,8 @@ el0_undef: * Undefined instruction */ mov x0, sp + // enable interrupts before calling the main handler + enable_irq b do_undefinstr el0_dbg: /* -- cgit v0.10.2 From c80b7ee8520606f77fbc8ced870c96659053269e Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Fri, 23 Aug 2013 16:16:42 +0100 Subject: arm64: move elf notes into readonly segment The current vmlinux.lds.S places the notes sections between the end of rw data and start of bss. This means that _edata doesn't really point to the end of data. Since notes are read-only, this patch moves them to the read-only segment so that _edata does point to the end of initialized rw data. Signed-off-by: Mark Salter Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f5e5574..f8ab9d8 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -71,6 +71,7 @@ SECTIONS RO_DATA(PAGE_SIZE) EXCEPTION_TABLE(8) + NOTES _etext = .; /* End of text and rodata section */ . = ALIGN(PAGE_SIZE); @@ -122,8 +123,6 @@ SECTIONS } _edata_loc = __data_loc + SIZEOF(.data); - NOTES - BSS_SECTION(0, 0, 0) _end = .; -- cgit v0.10.2 From e25208f77c2dad5a9f2ab3d3df61252a90b71afa Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 23 Aug 2013 18:04:44 +0100 Subject: arm64: Fix mapping of memory banks not ending on a PMD_SIZE boundary The map_mem() function limits the current memblock limit to PGDIR_SIZE (the initial swapper_pg_dir mapping) to avoid create_mapping() allocating memory from unmapped areas. However, if the first block is within PGDIR_SIZE and not ending on a PMD_SIZE boundary, when 4K page configuration is enabled, create_mapping() will try to allocate a pte page. Such page may be returned by memblock_alloc() from the end of such bank (or any subsequent bank within PGDIR_SIZE) which is not mapped yet. The patch limits the current memblock limit to the aligned end of the first bank and gradually increases it as more memory is mapped. It also ensures that the start of the first bank is aligned to PMD_SIZE to avoid pte page allocation for this mapping. Signed-off-by: Catalin Marinas Reported-by: "Leizhen (ThunderTown, Euler)" Tested-by: "Leizhen (ThunderTown, Euler)" diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a8d1059..f557ebb 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -296,6 +296,7 @@ void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) static void __init map_mem(void) { struct memblock_region *reg; + phys_addr_t limit; /* * Temporarily limit the memblock range. We need to do this as @@ -303,9 +304,11 @@ static void __init map_mem(void) * memory addressable from the initial direct kernel mapping. * * The initial direct kernel mapping, located at swapper_pg_dir, - * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (aligned). + * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be + * aligned to 2MB as per Documentation/arm64/booting.txt). */ - memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); + limit = PHYS_OFFSET + PGDIR_SIZE; + memblock_set_current_limit(limit); /* map all the memory banks */ for_each_memblock(memory, reg) { @@ -315,6 +318,22 @@ static void __init map_mem(void) if (start >= end) break; +#ifndef CONFIG_ARM64_64K_PAGES + /* + * For the first memory bank align the start address and + * current memblock limit to prevent create_mapping() from + * allocating pte page tables from unmapped memory. + * When 64K pages are enabled, the pte page table for the + * first PGDIR_SIZE is already present in swapper_pg_dir. + */ + if (start < limit) + start = ALIGN(start, PMD_SIZE); + if (end < limit) { + limit = end & PMD_MASK; + memblock_set_current_limit(limit); + } +#endif + create_mapping(start, __phys_to_virt(start), end - start); } -- cgit v0.10.2 From 326b16db9f69fd0d279be873c6c00f88c0a4aad5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 30 Aug 2013 18:06:48 +0100 Subject: arm64: delay: don't bother reporting bogomips in /proc/cpuinfo We always use a timer-backed delay loop for arm64, so don't bother reporting a bogomips value which appears to confuse some people. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index add6ea6..bca4c1c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -328,9 +328,6 @@ static int c_show(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif - seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", - loops_per_jiffy / (500000UL/HZ), - loops_per_jiffy / (5000UL/HZ) % 100); } /* dump out the processor features */ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index fee5cce..78db90d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -223,11 +223,7 @@ asmlinkage void secondary_start_kernel(void) void __init smp_cpus_done(unsigned int max_cpus) { - unsigned long bogosum = loops_per_jiffy * num_online_cpus(); - - pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", - num_online_cpus(), bogosum / (500000/HZ), - (bogosum / (5000/HZ)) % 100); + pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); } void __init smp_prepare_boot_cpu(void) -- cgit v0.10.2 From f3a1d7d53dccf51959aec16b574617cc6bfeca09 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 2 Sep 2013 16:33:54 +0100 Subject: arm64: Remove unused cpu_name ascii in arch/arm64/mm/proc.S This string has been moved to arch/arm64/kernel/cputable.c. Signed-off-by: Catalin Marinas diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a82ae88..f84fcf7 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm) ret ENDPROC(cpu_do_switch_mm) -cpu_name: - .ascii "AArch64 Processor" - .align - .section ".text.init", #alloc, #execinstr /* -- cgit v0.10.2 From 909e3ee4119f87b85c6e1b8534b2287ed1ea3ca2 Mon Sep 17 00:00:00 2001 From: Dan Aloni Date: Wed, 28 Aug 2013 14:24:53 +0100 Subject: Move the EM_ARM and EM_AARCH64 definitions to uapi/linux/elf-em.h Signed-off-by: Dan Aloni Signed-off-by: Catalin Marinas diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 56211f2..f4b46d3 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fp elf_fpregset_t; -#define EM_ARM 40 - #define EF_ARM_EABI_MASK 0xff000000 #define EF_ARM_EABI_UNKNOWN 0x00000000 #define EF_ARM_EABI_VER1 0x01000000 diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index fe32c0e..e7fa87f 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -33,8 +33,6 @@ typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fpsimd_state elf_fpregset_t; -#define EM_AARCH64 183 - /* * AArch64 static relocation types. */ @@ -151,7 +149,6 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk #ifdef CONFIG_COMPAT -#define EM_ARM 40 #define COMPAT_ELF_PLATFORM ("v8l") #define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index 8e2b7ba..59c17a2 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -22,6 +22,7 @@ #define EM_PPC 20 /* PowerPC */ #define EM_PPC64 21 /* PowerPC64 */ #define EM_SPU 23 /* Cell BE SPU */ +#define EM_ARM 40 /* ARM 32 bit */ #define EM_SH 42 /* SuperH */ #define EM_SPARCV9 43 /* SPARC v9 64-bit */ #define EM_IA_64 50 /* HP/Intel IA-64 */ @@ -34,6 +35,7 @@ #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ #define EM_BLACKFIN 106 /* ADI Blackfin Processor */ #define EM_TI_C6000 140 /* TI C6X DSPs */ +#define EM_AARCH64 183 /* ARM 64 bit */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ #define EM_AVR32 0x18ad /* Atmel AVR32 */ -- cgit v0.10.2 From d50240a5f6ceaf690a77b0fccb17be51cfa151c2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 12 Jun 2013 16:28:04 +0100 Subject: arm64: mm: permit use of tagged pointers at EL0 TCR.TBI0 can be used to cause hardware address translation to ignore the top byte of userspace virtual addresses. Whilst not especially useful in standard C programs, this can be used by JITs to `tag' pointers with various pieces of metadata. This patch enables this bit for AArch64 Linux, and adds a new file to Documentation/arm64/ which describes some potential caveats when using tagged virtual addresses. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt new file mode 100644 index 0000000..264e984 --- /dev/null +++ b/Documentation/arm64/tagged-pointers.txt @@ -0,0 +1,34 @@ + Tagged virtual addresses in AArch64 Linux + ========================================= + +Author: Will Deacon +Date : 12 June 2013 + +This document briefly describes the provision of tagged virtual +addresses in the AArch64 translation system and their potential uses +in AArch64 Linux. + +The kernel configures the translation tables so that translations made +via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of +the virtual address ignored by the translation hardware. This frees up +this byte for application use, with the following caveats: + + (1) The kernel requires that all user addresses passed to EL1 + are tagged with tag 0x00. This means that any syscall + parameters containing user virtual addresses *must* have + their top byte cleared before trapping to the kernel. + + (2) Tags are not guaranteed to be preserved when delivering + signals. This means that signal handlers in applications + making use of tags cannot rely on the tag information for + user virtual addresses being maintained for fields inside + siginfo_t. One exception to this rule is for signals raised + in response to debug exceptions, where the tag information + will be preserved. + + (3) Special care should be taken when using tagged pointers, + since it is likely that C compilers will not hazard two + addresses differing only in the upper bits. + +The architecture prevents the use of a tagged PC, so the upper byte will +be set to a sign-extension of bit 55 on exception return. diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index e182a35..d57e668 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -122,5 +122,6 @@ #define TCR_TG1_64K (UL(1) << 30) #define TCR_IPS_40BIT (UL(2) << 32) #define TCR_ASID16 (UL(1) << 36) +#define TCR_TBI0 (UL(1) << 37) #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 57640df..3881fd1 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -423,6 +423,7 @@ el0_da: * Data abort handling */ mrs x0, far_el1 + bic x0, x0, #(0xff << 56) disable_step x1 isb enable_dbg diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index f84fcf7..b1b31bb 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -147,7 +147,7 @@ ENTRY(__cpu_setup) * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \ - TCR_ASID16 | (1 << 31) + TCR_ASID16 | TCR_TBI0 | (1 << 31) #ifdef CONFIG_ARM64_64K_PAGES orr x10, x10, TCR_TG0_64K orr x10, x10, TCR_TG1_64K -- cgit v0.10.2 From 4d5e0b1527dd330940e8b7180b8d7016fc900352 Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Wed, 4 Sep 2013 15:10:02 +0100 Subject: Documentation/arm64: clarify requirements for DTB placement The current description of DTB placement requirements does not quite match the kernel code in head.S: __vet_fdt and __create_page_tables. This patch tweaks the text to match the actual requirements placed on it by the code. Signed-off-by: Mark Salter Acked-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index 5273c4d..98df4a0 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt @@ -45,9 +45,9 @@ sees fit.) Requirement: MANDATORY -The device tree blob (dtb) must be no bigger than 2 megabytes in size -and placed at a 2-megabyte boundary within the first 512 megabytes from -the start of the kernel image. This is to allow the kernel to map the +The device tree blob (dtb) must be placed on an 8-byte boundary within +the first 512 megabytes from the start of the kernel image and must not +cross a 2-megabyte boundary. This is to allow the kernel to map the blob using a single section mapping in the initial page tables. -- cgit v0.10.2