diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 277 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 19 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 51 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/pmu.c | 87 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 101 | ||||
-rw-r--r-- | arch/arm/kernel/sleep.S | 84 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/smp_scu.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/tcm.c | 68 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 126 |
17 files changed, 480 insertions, 408 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 927522c..16baba2 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -59,6 +59,9 @@ int main(void) DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); +#ifdef CONFIG_SMP + DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); +#endif #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd..fa02a22 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -29,21 +29,53 @@ #include <asm/entry-macro-multi.S> /* - * Interrupt handling. Preserves r7, r8, r9 + * Interrupt handling. */ .macro irq_handler #ifdef CONFIG_MULTI_IRQ_HANDLER - ldr r5, =handle_arch_irq + ldr r1, =handle_arch_irq mov r0, sp - ldr r5, [r5] + ldr r1, [r1] adr lr, BSYM(9997f) - teq r5, #0 - movne pc, r5 + teq r1, #0 + movne pc, r1 #endif arch_irq_handler_default 9997: .endm + .macro pabt_helper + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 +#ifdef MULTI_PABORT + ldr ip, .LCprocfns + mov lr, pc + ldr pc, [ip, #PROCESSOR_PABT_FUNC] +#else + bl CPU_PABORT_HANDLER +#endif + .endm + + .macro dabt_helper + + @ + @ Call the processor-specific abort handler: + @ + @ r2 - pt_regs + @ r4 - aborted context pc + @ r5 - aborted context psr + @ + @ The abort handler must return the aborted address in r0, and + @ the fault status register in r1. r9 must be preserved. + @ +#ifdef MULTI_DABORT + ldr ip, .LCprocfns + mov lr, pc + ldr pc, [ip, #PROCESSOR_DABT_FUNC] +#else + bl CPU_DABORT_HANDLER +#endif + .endm + #ifdef CONFIG_KPROBES .section .kprobes.text,"ax",%progbits #else @@ -126,106 +158,74 @@ ENDPROC(__und_invalid) SPFIX( subeq sp, sp, #4 ) stmia sp, {r1 - r12} - ldmia r0, {r1 - r3} - add r5, sp, #S_SP - 4 @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" - add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) - SPFIX( addeq r0, r0, #4 ) - str r1, [sp, #-4]! @ save the "real" r0 copied + ldmia r0, {r3 - r5} + add r7, sp, #S_SP - 4 @ here for interlock avoidance + mov r6, #-1 @ "" "" "" "" + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + SPFIX( addeq r2, r2, #4 ) + str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack - mov r1, lr + mov r3, lr @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r0 - sp_svc - @ r1 - lr_svc - @ r2 - lr_<exception>, already fixed up for correct return/restart - @ r3 - spsr_<exception> - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r2 - sp_svc + @ r3 - lr_svc + @ r4 - lr_<exception>, already fixed up for correct return/restart + @ r5 - spsr_<exception> + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ - stmia r5, {r0 - r4} + stmia r7, {r2 - r6} + +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif .endm .align 5 __dabt_svc: svc_entry - - @ - @ get ready to re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT - - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. r9 must be preserved. - @ -#ifdef MULTI_DABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] -#else - bl CPU_DABORT_HANDLER -#endif - - @ - @ set desired IRQ state, then call main handler - @ - debug_entry r1 - msr cpsr_c, r9 mov r2, sp - bl do_DataAbort + dabt_helper @ @ IRQs off again before pulling preserved data off the stack @ disable_irq_notrace - @ - @ restore SPSR and restart the instruction - @ - ldr r2, [sp, #S_PSR] - svc_exit r2 @ return from exception +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__dabt_svc) .align 5 __irq_svc: svc_entry + irq_handler -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - - irq_handler -#ifdef CONFIG_PREEMPT - str r8, [tsk, #TI_PREEMPT] @ restore preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 movne r0, #0 @ force flags to 0 tst r0, #_TIF_NEED_RESCHED blne svc_preempt #endif - ldr r4, [sp, #S_PSR] @ irqs are already disabled + #ifdef CONFIG_TRACE_IRQFLAGS - tst r4, #PSR_I_BIT - bleq trace_hardirqs_on + @ The parent context IRQs must have been enabled to get here in + @ the first place, so there's no point checking the PSR I bit. + bl trace_hardirqs_on #endif - svc_exit r4 @ return from exception + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__irq_svc) @@ -251,7 +251,6 @@ __und_svc: #else svc_entry #endif - @ @ call emulation code, which returns using r9 if it has emulated @ the instruction, or the more conventional lr if we are to treat @@ -260,15 +259,16 @@ __und_svc: @ r0 - instruction @ #ifndef CONFIG_THUMB2_KERNEL - ldr r0, [r2, #-4] + ldr r0, [r4, #-4] #else - ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 + ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 and r9, r0, #0xf800 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 - ldrhhs r9, [r2] @ bottom 16 bits + ldrhhs r9, [r4] @ bottom 16 bits orrhs r0, r9, r0, lsl #16 #endif adr r9, BSYM(1f) + mov r2, r4 bl call_fpe mov r0, sp @ struct pt_regs *regs @@ -282,45 +282,35 @@ __und_svc: @ @ restore SPSR and restart the instruction @ - ldr r2, [sp, #S_PSR] @ Get SVC cpsr - svc_exit r2 @ return from exception + ldr r5, [sp, #S_PSR] @ Get SVC cpsr +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__und_svc) .align 5 __pabt_svc: svc_entry - - @ - @ re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT - - mov r0, r2 @ pass address of aborted instruction. -#ifdef MULTI_PABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] -#else - bl CPU_PABORT_HANDLER -#endif - debug_entry r1 - msr cpsr_c, r9 @ Maybe enable interrupts mov r2, sp @ regs - bl do_PrefetchAbort @ call abort handler + pabt_helper @ @ IRQs off again before pulling preserved data off the stack @ disable_irq_notrace - @ - @ restore SPSR and restart the instruction - @ - ldr r2, [sp, #S_PSR] - svc_exit r2 @ return from exception +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__pabt_svc) @@ -351,23 +341,23 @@ ENDPROC(__pabt_svc) ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) - ldmia r0, {r1 - r3} + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" + mov r6, #-1 @ "" "" "" "" - str r1, [sp] @ save the "real" r0 copied + str r3, [sp] @ save the "real" r0 copied @ from the exception stack @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r2 - lr_<exception>, already fixed up for correct return/restart - @ r3 - spsr_<exception> - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r4 - lr_<exception>, already fixed up for correct return/restart + @ r5 - spsr_<exception> + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ @ Also, separately save sp_usr and lr_usr @ - stmia r0, {r2 - r4} + stmia r0, {r4 - r6} ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) @@ -380,6 +370,10 @@ ENDPROC(__pabt_svc) @ Clear FP to mark the first stack frame @ zero_fp + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif .endm .macro kuser_cmpxchg_check @@ -391,7 +385,7 @@ ENDPROC(__pabt_svc) @ if it was interrupted in a critical region. Here we @ perform a quick test inline since it should be false @ 99.9999% of the time. The rest is done out of line. - cmp r2, #TASK_SIZE + cmp r4, #TASK_SIZE blhs kuser_cmpxchg_fixup #endif #endif @@ -401,32 +395,9 @@ ENDPROC(__pabt_svc) __dabt_usr: usr_entry kuser_cmpxchg_check - - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. - @ -#ifdef MULTI_DABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] -#else - bl CPU_DABORT_HANDLER -#endif - - @ - @ IRQs on, then call the main handler - @ - debug_entry r1 - enable_irq mov r2, sp - adr lr, BSYM(ret_from_exception) - b do_DataAbort + dabt_helper + b ret_from_exception UNWIND(.fnend ) ENDPROC(__dabt_usr) @@ -434,28 +405,8 @@ ENDPROC(__dabt_usr) __irq_usr: usr_entry kuser_cmpxchg_check - -#ifdef CONFIG_IRQSOFF_TRACER - bl trace_hardirqs_off -#endif - - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - irq_handler -#ifdef CONFIG_PREEMPT - ldr r0, [tsk, #TI_PREEMPT] - str r8, [tsk, #TI_PREEMPT] - teq r0, r7 - ARM( strne r0, [r0, -r0] ) - THUMB( movne r0, #0 ) - THUMB( strne r0, [r0] ) -#endif - + get_thread_info tsk mov why, #0 b ret_to_user_from_irq UNWIND(.fnend ) @@ -467,6 +418,9 @@ ENDPROC(__irq_usr) __und_usr: usr_entry + mov r2, r4 + mov r3, r5 + @ @ fall through to the emulation code, which returns using r9 if @ it has emulated the instruction, or the more conventional lr @@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry - - mov r0, r2 @ pass address of aborted instruction. -#ifdef MULTI_PABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] -#else - bl CPU_PABORT_HANDLER -#endif - debug_entry r1 - enable_irq @ Enable interrupts mov r2, sp @ regs - bl do_PrefetchAbort @ call abort handler + pabt_helper UNWIND(.fnend ) /* fall through */ /* @@ -927,13 +870,13 @@ __kuser_cmpxchg: @ 0xffff0fc0 .text kuser_cmpxchg_fixup: @ Called from kuser_cmpxchg_check macro. - @ r2 = address of interrupted insn (must be preserved). + @ r4 = address of interrupted insn (must be preserved). @ sp = saved regs. r7 and r8 are clobbered. @ 1b = first critical insn, 2b = last critical insn. - @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. + @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) - subs r8, r2, r7 + subs r8, r4, r7 rsbcss r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] mov pc, lr diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 051166c..4d6ad83 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -165,25 +165,6 @@ .endm #endif /* !CONFIG_THUMB2_KERNEL */ - @ - @ Debug exceptions are taken as prefetch or data aborts. - @ We must disable preemption during the handler so that - @ we can access the debug registers safely. - @ - .macro debug_entry, fsr -#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) - ldr r4, =0x40f @ mask out fsr.fs - and r5, r4, \fsr - cmp r5, #2 @ debug exception - bne 1f - get_thread_info r10 - ldr r6, [r10, #TI_PREEMPT] @ get preempt count - add r11, r6, #1 @ increment it - str r11, [r10, #TI_PREEMPT] -1: -#endif - .endm - /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - r0 to r6. diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b1e0ad..d46f259 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -32,8 +32,16 @@ * numbers for r1. * */ + .arm + __HEAD ENTRY(stext) + + THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, + THUMB( .thumb ) @ switch to Thumb now. + THUMB(1: ) + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled #ifndef CONFIG_CPU_CP15 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 278c1b0..742b610 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -71,8 +71,16 @@ * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ + .arm + __HEAD ENTRY(stext) + + THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, + THUMB( .thumb ) @ switch to Thumb now. + THUMB(1: ) + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled mrc p15, 0, r9, c0, c0 @ get processor id diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 87acc25..a927ca1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -796,7 +796,7 @@ unlock: /* * Called from either the Data Abort Handler [watchpoint] or the - * Prefetch Abort Handler [breakpoint] with preemption disabled. + * Prefetch Abort Handler [breakpoint] with interrupts disabled. */ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, int ret = 0; u32 dscr; - /* We must be called with preemption disabled. */ - WARN_ON(preemptible()); + preempt_disable(); + + if (interrupts_enabled(regs)) + local_irq_enable(); /* We only handle watchpoints and hardware breakpoints. */ ARM_DBG_READ(c1, 0, dscr); @@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, ret = 1; /* Unhandled fault. */ } - /* - * Re-enable preemption after it was disabled in the - * low-level exception handling code. - */ preempt_enable(); return ret; diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 83bbad0..0f928a1 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void) #ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_data *d) +static bool migrate_one_irq(struct irq_desc *desc) { - unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); + struct irq_data *d = irq_desc_get_irq_data(desc); + const struct cpumask *affinity = d->affinity; + struct irq_chip *c; bool ret = false; - if (cpu >= nr_cpu_ids) { - cpu = cpumask_any(cpu_online_mask); + /* + * If this is a per-CPU interrupt, or the affinity does not + * include this CPU, then we have nothing to do. + */ + if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) + return false; + + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity = cpu_online_mask; ret = true; } - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); - - d->chip->irq_set_affinity(d, cpumask_of(cpu), true); + c = irq_data_get_irq_chip(d); + if (c->irq_set_affinity) + c->irq_set_affinity(d, affinity, true); + else + pr_debug("IRQ%u: unable to set affinity\n", d->irq); return ret; } /* - * The CPU has been marked offline. Migrate IRQs off this CPU. If - * the affinity settings do not allow other CPUs, force them onto any + * The current CPU has been marked offline. Migrate IRQs off this CPU. + * If the affinity settings do not allow other CPUs, force them onto any * available CPU. + * + * Note: we must iterate over all IRQs, whether they have an attached + * action structure or not, as we need to get chained interrupts too. */ void migrate_irqs(void) { - unsigned int i, cpu = smp_processor_id(); + unsigned int i; struct irq_desc *desc; unsigned long flags; local_irq_save(flags); for_each_irq_desc(i, desc) { - struct irq_data *d = &desc->irq_data; bool affinity_broken = false; - raw_spin_lock(&desc->lock); - do { - if (desc->action == NULL) - break; - - if (d->node != cpu) - break; + if (!desc) + continue; - affinity_broken = migrate_one_irq(d); - } while (0); + raw_spin_lock(&desc->lock); + affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) - pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); + pr_warning("IRQ%u no longer affine to CPU%u\n", i, + smp_processor_id()); } local_irq_restore(flags); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index fee7c36..016d6a0 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset -= 0x02000000; offset += sym->st_value - loc; - /* only Thumb addresses allowed (no interworking) */ - if (!(offset & 1) || + /* + * For function symbols, only Thumb addresses are + * allowed (no interworking). + * + * For non-function symbols, the destination + * has no specific ARM/Thumb disposition, so + * the branch is resolved under the assumption + * that interworking is not required. + */ + if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC && + !(offset & 1)) || offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d53c0ab..8d85078 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -435,7 +435,7 @@ armpmu_reserve_hardware(void) if (irq >= 0) free_irq(irq, NULL); } - release_pmu(pmu_device); + release_pmu(ARM_PMU_DEVICE_CPU); pmu_device = NULL; } @@ -454,7 +454,7 @@ armpmu_release_hardware(void) } armpmu->stop(); - release_pmu(pmu_device); + release_pmu(ARM_PMU_DEVICE_CPU); pmu_device = NULL; } @@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event) static void armpmu_enable(struct pmu *pmu) { /* Enable all of the perf events on hardware. */ - int idx; + int idx, enabled = 0; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!armpmu) @@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu) continue; armpmu->enable(&event->hw, idx); + enabled = 1; } - armpmu->start(); + if (enabled) + armpmu->start(); } static void armpmu_disable(struct pmu *pmu) diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c79eec..2b70709 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -17,6 +17,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <asm/pmu.h> @@ -25,36 +26,88 @@ static volatile long pmu_lock; static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; -static int __devinit pmu_device_probe(struct platform_device *pdev) +static int __devinit pmu_register(struct platform_device *pdev, + enum arm_pmu_type type) { - - if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { + if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { pr_warning("received registration request for unknown " - "device %d\n", pdev->id); + "device %d\n", type); return -EINVAL; } - if (pmu_devices[pdev->id]) - pr_warning("registering new PMU device type %d overwrites " - "previous registration!\n", pdev->id); - else - pr_info("registered new PMU device of type %d\n", - pdev->id); + if (pmu_devices[type]) { + pr_warning("rejecting duplicate registration of PMU device " + "type %d.", type); + return -ENOSPC; + } - pmu_devices[pdev->id] = pdev; + pr_info("registered new PMU device of type %d\n", type); + pmu_devices[type] = pdev; return 0; } -static struct platform_driver pmu_driver = { +#define OF_MATCH_PMU(_name, _type) { \ + .compatible = _name, \ + .data = (void *)_type, \ +} + +#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU) + +static struct of_device_id armpmu_of_device_ids[] = { + OF_MATCH_CPU("arm,cortex-a9-pmu"), + OF_MATCH_CPU("arm,cortex-a8-pmu"), + OF_MATCH_CPU("arm,arm1136-pmu"), + OF_MATCH_CPU("arm,arm1176-pmu"), + {}, +}; + +#define PLAT_MATCH_PMU(_name, _type) { \ + .name = _name, \ + .driver_data = _type, \ +} + +#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU) + +static struct platform_device_id armpmu_plat_device_ids[] = { + PLAT_MATCH_CPU("arm-pmu"), + {}, +}; + +enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + const struct platform_device_id *pdev_id; + + /* provided by of_device_id table */ + if (pdev->dev.of_node) { + of_id = of_match_device(armpmu_of_device_ids, &pdev->dev); + BUG_ON(!of_id); + return (enum arm_pmu_type)of_id->data; + } + + /* Provided by platform_device_id table */ + pdev_id = platform_get_device_id(pdev); + BUG_ON(!pdev_id); + return pdev_id->driver_data; +} + +static int __devinit armpmu_device_probe(struct platform_device *pdev) +{ + return pmu_register(pdev, armpmu_device_type(pdev)); +} + +static struct platform_driver armpmu_driver = { .driver = { .name = "arm-pmu", + .of_match_table = armpmu_of_device_ids, }, - .probe = pmu_device_probe, + .probe = armpmu_device_probe, + .id_table = armpmu_plat_device_ids, }; static int __init register_pmu_driver(void) { - return platform_driver_register(&pmu_driver); + return platform_driver_register(&armpmu_driver); } device_initcall(register_pmu_driver); @@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device) EXPORT_SYMBOL_GPL(reserve_pmu); int -release_pmu(struct platform_device *pdev) +release_pmu(enum arm_pmu_type device) { - if (WARN_ON(pdev != pmu_devices[pdev->id])) + if (WARN_ON(!pmu_devices[device])) return -EINVAL; - clear_bit_unlock(pdev->id, &pmu_lock); + clear_bit_unlock(device, &pmu_lock); return 0; } EXPORT_SYMBOL_GPL(release_pmu); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb0..9c3278f 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup); #endif extern void paging_init(struct machine_desc *desc); +extern void sanity_check_meminfo(void); extern void reboot_setup(char *str); unsigned int processor_id; @@ -342,54 +343,6 @@ static void __init feat_v6_fixup(void) elf_hwcap &= ~HWCAP_TLS; } -static void __init setup_processor(void) -{ - struct proc_info_list *list; - - /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc-*.S - */ - list = lookup_processor_type(read_cpuid_id()); - if (!list) { - printk("CPU configuration botched (ID %08x), unable " - "to continue.\n", read_cpuid_id()); - while (1); - } - - cpu_name = list->cpu_name; - -#ifdef MULTI_CPU - processor = *list->proc; -#endif -#ifdef MULTI_TLB - cpu_tlb = *list->tlb; -#endif -#ifdef MULTI_USER - cpu_user = *list->user; -#endif -#ifdef MULTI_CACHE - cpu_cache = *list->cache; -#endif - - printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15, - proc_arch[cpu_architecture()], cr_alignment); - - sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); - sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); - elf_hwcap = list->elf_hwcap; -#ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; -#endif - - feat_v6_fixup(); - - cacheid_init(); - cpu_proc_init(); -} - /* * cpu_init - initialise one CPU. * @@ -405,6 +358,8 @@ void cpu_init(void) BUG(); } + cpu_proc_init(); + /* * Define the placement constraint for the inline asm directive below. * In Thumb-2, msr with an immediate value is not allowed. @@ -441,6 +396,54 @@ void cpu_init(void) : "r14"); } +static void __init setup_processor(void) +{ + struct proc_info_list *list; + + /* + * locate processor in the list of supported processor + * types. The linker builds this table for us from the + * entries in arch/arm/mm/proc-*.S + */ + list = lookup_processor_type(read_cpuid_id()); + if (!list) { + printk("CPU configuration botched (ID %08x), unable " + "to continue.\n", read_cpuid_id()); + while (1); + } + + cpu_name = list->cpu_name; + +#ifdef MULTI_CPU + processor = *list->proc; +#endif +#ifdef MULTI_TLB + cpu_tlb = *list->tlb; +#endif +#ifdef MULTI_USER + cpu_user = *list->user; +#endif +#ifdef MULTI_CACHE + cpu_cache = *list->cache; +#endif + + printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", + cpu_name, read_cpuid_id(), read_cpuid_id() & 15, + proc_arch[cpu_architecture()], cr_alignment); + + sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); + sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); + elf_hwcap = list->elf_hwcap; +#ifndef CONFIG_ARM_THUMB + elf_hwcap &= ~HWCAP_THUMB; +#endif + + feat_v6_fixup(); + + cacheid_init(); + cpu_init(); +} + void __init dump_machine_table(void) { struct machine_desc *p; @@ -900,6 +903,7 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); paging_init(mdesc); @@ -913,7 +917,6 @@ void __init setup_arch(char **cmdline_p) #endif reserve_crashkernel(); - cpu_init(); tcm_init(); #ifdef CONFIG_MULTI_IRQ_HANDLER diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 6398ead..dc902f2 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -10,64 +10,61 @@ /* * Save CPU state for a suspend * r1 = v:p offset - * r3 = virtual return function - * Note: sp is decremented to allocate space for CPU state on stack - * r0-r3,r9,r10,lr corrupted + * r2 = suspend function arg0 + * r3 = suspend function */ -ENTRY(cpu_suspend) - mov r9, lr +ENTRY(__cpu_suspend) + stmfd sp!, {r4 - r11, lr} #ifdef MULTI_CPU ldr r10, =processor - mov r2, sp @ current virtual SP - ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function - sub sp, sp, r0 @ allocate CPU state on stack - mov r0, sp @ save pointer +#else + ldr r5, =cpu_suspend_size + ldr ip, =cpu_do_resume +#endif + mov r6, sp @ current virtual SP + sub sp, sp, r5 @ allocate CPU state on stack + mov r0, sp @ save pointer to CPU save block add ip, ip, r1 @ convert resume fn to phys - stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn - ldr r3, =sleep_save_sp - add r2, sp, r1 @ convert SP to phys + stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn + ldr r5, =sleep_save_sp + add r6, sp, r1 @ convert SP to phys + stmfd sp!, {r2, r3} @ save suspend func arg and pointer #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, lr, c0, c0, 5) ALT_UP(mov lr, #0) and lr, lr, #15 - str r2, [r3, lr, lsl #2] @ save phys SP + str r6, [r5, lr, lsl #2] @ save phys SP #else - str r2, [r3] @ save phys SP + str r6, [r5] @ save phys SP #endif +#ifdef MULTI_CPU mov lr, pc ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state #else - mov r2, sp @ current virtual SP - ldr r0, =cpu_suspend_size - sub sp, sp, r0 @ allocate CPU state on stack - mov r0, sp @ save pointer - stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn - ldr r3, =sleep_save_sp - add r2, sp, r1 @ convert SP to phys -#ifdef CONFIG_SMP - ALT_SMP(mrc p15, 0, lr, c0, c0, 5) - ALT_UP(mov lr, #0) - and lr, lr, #15 - str r2, [r3, lr, lsl #2] @ save phys SP -#else - str r2, [r3] @ save phys SP -#endif bl cpu_do_suspend #endif @ flush data cache #ifdef MULTI_CACHE ldr r10, =cpu_cache - mov lr, r9 + mov lr, pc ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] #else - mov lr, r9 - b __cpuc_flush_kern_all + bl __cpuc_flush_kern_all #endif -ENDPROC(cpu_suspend) + adr lr, BSYM(cpu_suspend_abort) + ldmfd sp!, {r0, pc} @ call suspend fn +ENDPROC(__cpu_suspend) .ltorg +cpu_suspend_abort: + ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn + mov sp, r2 + ldmfd sp!, {r4 - r11, pc} +ENDPROC(cpu_suspend_abort) + /* * r0 = control register value * r1 = v:p offset (preserved by cpu_do_resume) @@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on) cpu_resume_after_mmu: str r5, [r2, r4, lsl #2] @ restore old mapping mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache - mov pc, lr + bl cpu_init @ restore the und/abt/irq banked regs + mov r0, #0 @ return zero on success + ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) /* @@ -120,20 +119,11 @@ ENTRY(cpu_resume) ldr r0, sleep_save_sp @ stack phys addr #endif setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off -#ifdef MULTI_CPU - @ load v:p, stack, return fn, resume fn - ARM( ldmia r0!, {r1, sp, lr, pc} ) -THUMB( ldmia r0!, {r1, r2, r3, r4} ) + @ load v:p, stack, resume fn + ARM( ldmia r0!, {r1, sp, pc} ) +THUMB( ldmia r0!, {r1, r2, r3} ) THUMB( mov sp, r2 ) -THUMB( mov lr, r3 ) -THUMB( bx r4 ) -#else - @ load v:p, stack, return fn - ARM( ldmia r0!, {r1, sp, lr} ) -THUMB( ldmia r0!, {r1, r2, lr} ) -THUMB( mov sp, r2 ) - b cpu_do_resume -#endif +THUMB( bx r3 ) ENDPROC(cpu_resume) sleep_save_sp: diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 344e52b..167e3cb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void) smp_store_cpu_info(cpu); /* - * OK, now it's safe to let the boot CPU continue + * OK, now it's safe to let the boot CPU continue. Wait for + * the CPU migration code to notice that the CPU is online + * before we continue. */ set_cpu_online(cpu, true); + while (!cpu_active(cpu)) + cpu_relax(); /* * OK, it's off to the idle thread for us @@ -361,8 +365,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ if (max_cpus > ncores) max_cpus = ncores; - - if (max_cpus > 1) { + if (ncores > 1 && max_cpus) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. @@ -370,6 +373,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus) percpu_timer_setup(); /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. A platform should + * re-initialize the map in platform_smp_prepare_cpus() if + * present != possible (e.g. physical hotplug). + */ + init_cpu_present(&cpu_possible_map); + + /* * Initialise the SCU if there are more than one CPU * and let them know where to start. */ diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index a1e757c..79ed5e7 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -20,6 +20,7 @@ #define SCU_INVALIDATE 0x0c #define SCU_FPGA_REVISION 0x10 +#ifdef CONFIG_SMP /* * Get the number of CPU cores from the SCU configuration */ @@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } +#endif /* * Set the executing CPUs power mode as defined. This will be in diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 60636f4..2c277d4 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void) twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, - (twd_timer_rate / 1000000) % 100); + (twd_timer_rate / 10000) % 100); } } diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index f5cf660..30e302d 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -19,6 +19,8 @@ #include "tcm.h" static struct gen_pool *tcm_pool; +static bool dtcm_present; +static bool itcm_present; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; @@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len) } EXPORT_SYMBOL(tcm_free); +bool tcm_dtcm_present(void) +{ + return dtcm_present; +} +EXPORT_SYMBOL(tcm_dtcm_present); + +bool tcm_itcm_present(void) +{ + return itcm_present; +} +EXPORT_SYMBOL(tcm_itcm_present); + static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, u32 *offset) { @@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, (tcm_region & 1) ? "" : "not "); } + /* Not much fun you can do with a size 0 bank */ + if (tcm_size == 0) + return 0; + /* Force move the TCM bank to where we want it, enable */ tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; @@ -165,12 +183,20 @@ void __init tcm_init(void) u32 tcm_status = read_cpuid_tcmstatus(); u8 dtcm_banks = (tcm_status >> 16) & 0x03; u8 itcm_banks = (tcm_status & 0x03); + size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; + size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; char *end; char *ram; int ret; int i; + /* Values greater than 2 for D/ITCM banks are "reserved" */ + if (dtcm_banks > 2) + dtcm_banks = 0; + if (itcm_banks > 2) + itcm_banks = 0; + /* Setup DTCM if present */ if (dtcm_banks > 0) { for (i = 0; i < dtcm_banks; i++) { @@ -178,6 +204,13 @@ void __init tcm_init(void) if (ret) return; } + /* This means you compiled more code than fits into DTCM */ + if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { + pr_info("CPU DTCM: %u bytes of code compiled to " + "DTCM but only %lu bytes of DTCM present\n", + dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); + goto no_dtcm; + } dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; @@ -186,12 +219,16 @@ void __init tcm_init(void) start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; - /* This means you compiled more code than fits into DTCM */ - BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); - memcpy(start, ram, (end-start)); - pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); + memcpy(start, ram, dtcm_code_sz); + pr_debug("CPU DTCM: copied data from %p - %p\n", + start, end); + dtcm_present = true; + } else if (dtcm_code_sz) { + pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " + "DTCM banks present in CPU\n", dtcm_code_sz); } +no_dtcm: /* Setup ITCM if present */ if (itcm_banks > 0) { for (i = 0; i < itcm_banks; i++) { @@ -199,6 +236,13 @@ void __init tcm_init(void) if (ret) return; } + /* This means you compiled more code than fits into ITCM */ + if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { + pr_info("CPU ITCM: %u bytes of code compiled to " + "ITCM but only %lu bytes of ITCM present\n", + itcm_code_sz, (itcm_end - ITCM_OFFSET)); + return; + } itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); itcm_iomap[0].length = itcm_end - ITCM_OFFSET; @@ -207,10 +251,13 @@ void __init tcm_init(void) start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; - /* This means you compiled more code than fits into ITCM */ - BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); - memcpy(start, ram, (end-start)); - pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); + memcpy(start, ram, itcm_code_sz); + pr_debug("CPU ITCM: copied code from %p - %p\n", + start, end); + itcm_present = true; + } else if (itcm_code_sz) { + pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " + "ITCM banks present in CPU\n", itcm_code_sz); } } @@ -221,7 +268,6 @@ void __init tcm_init(void) */ static int __init setup_tcm_pool(void) { - u32 tcm_status = read_cpuid_tcmstatus(); u32 dtcm_pool_start = (u32) &__edtcm_data; u32 itcm_pool_start = (u32) &__eitcm_text; int ret; @@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void) pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ - if (tcm_status & (0x03 << 16)) { + if (dtcm_present) { if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, dtcm_end - dtcm_pool_start, -1); @@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void) } /* Add the rest of ITCM to the TCM pool */ - if (tcm_status & 0x03) { + if (itcm_present) { if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, itcm_end - itcm_pool_start, -1); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index e5287f2..bf977f8 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4; SECTIONS { -#ifdef CONFIG_XIP_KERNEL - . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); -#else - . = PAGE_OFFSET + TEXT_OFFSET; -#endif - - .init : { /* Init code and data */ - _stext = .; - _sinittext = .; - HEAD_TEXT - INIT_TEXT - ARM_EXIT_KEEP(EXIT_TEXT) - _einittext = .; - ARM_CPU_DISCARD(PROC_INFO) - __arch_info_begin = .; - *(.arch.info.init) - __arch_info_end = .; - __tagtable_begin = .; - *(.taglist.init) - __tagtable_end = .; -#ifdef CONFIG_SMP_ON_UP - __smpalt_begin = .; - *(.alt.smp.init) - __smpalt_end = .; -#endif - - __pv_table_begin = .; - *(.pv_table) - __pv_table_end = .; - - INIT_SETUP(16) - - INIT_CALLS - CON_INITCALL - SECURITY_INITCALL - INIT_RAM_FS - -#ifndef CONFIG_XIP_KERNEL - __init_begin = _stext; - INIT_DATA - ARM_EXIT_KEEP(EXIT_DATA) -#endif - } - - PERCPU_SECTION(32) - -#ifndef CONFIG_XIP_KERNEL - . = ALIGN(PAGE_SIZE); - __init_end = .; -#endif - /* * unwind exit sections must be discarded before the rest of the * unwind sections get included. @@ -106,10 +55,22 @@ SECTIONS *(.fixup) *(__ex_table) #endif +#ifndef CONFIG_SMP_ON_UP + *(.alt.smp.init) +#endif } +#ifdef CONFIG_XIP_KERNEL + . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); +#else + . = PAGE_OFFSET + TEXT_OFFSET; +#endif + .head.text : { + _text = .; + HEAD_TEXT + } .text : { /* Real text segment */ - _text = .; /* Text and read-only data */ + _stext = .; /* Text and read-only data */ __exception_text_start = .; *(.exception.text) __exception_text_end = .; @@ -122,8 +83,6 @@ SECTIONS *(.fixup) #endif *(.gnu.warning) - *(.rodata) - *(.rodata.*) *(.glue_7) *(.glue_7t) . = ALIGN(4); @@ -152,10 +111,63 @@ SECTIONS _etext = .; /* End of text and rodata section */ +#ifndef CONFIG_XIP_KERNEL + . = ALIGN(PAGE_SIZE); + __init_begin = .; +#endif + + INIT_TEXT_SECTION(8) + .exit.text : { + ARM_EXIT_KEEP(EXIT_TEXT) + } + .init.proc.info : { + ARM_CPU_DISCARD(PROC_INFO) + } + .init.arch.info : { + __arch_info_begin = .; + *(.arch.info.init) + __arch_info_end = .; + } + .init.tagtable : { + __tagtable_begin = .; + *(.taglist.init) + __tagtable_end = .; + } +#ifdef CONFIG_SMP_ON_UP + .init.smpalt : { + __smpalt_begin = .; + *(.alt.smp.init) + __smpalt_end = .; + } +#endif + .init.pv_table : { + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + } + .init.data : { +#ifndef CONFIG_XIP_KERNEL + INIT_DATA +#endif + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + } +#ifndef CONFIG_XIP_KERNEL + .exit.data : { + ARM_EXIT_KEEP(EXIT_DATA) + } +#endif + + PERCPU_SECTION(32) + #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else + __init_end = .; . = ALIGN(THREAD_SIZE); __data_loc = .; #endif @@ -270,12 +282,6 @@ SECTIONS /* Default discards */ DISCARDS - -#ifndef CONFIG_SMP_ON_UP - /DISCARD/ : { - *(.alt.smp.init) - } -#endif } /* |