diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 139 |
1 files changed, 132 insertions, 7 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad2..d100eac 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -19,14 +19,15 @@ #include <linux/mm.h> #include <linux/err.h> #include <linux/cpu.h> -#include <linux/smp.h> #include <linux/seq_file.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/clockchips.h> #include <linux/completion.h> +#include <linux/cpufreq.h> #include <linux/atomic.h> +#include <asm/smp.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/cputype.h> @@ -42,6 +43,7 @@ #include <asm/ptrace.h> #include <asm/localtimer.h> #include <asm/smp_plat.h> +#include <asm/mach/arch.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -50,8 +52,15 @@ */ struct secondary_data secondary_data; +/* + * control for which core is the next to come out of the secondary + * boot "holding pen" + */ +volatile int __cpuinitdata pen_release = -1; + enum ipi_msg_type { - IPI_TIMER = 2, + IPI_WAKEUP, + IPI_TIMER, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, @@ -60,6 +69,14 @@ enum ipi_msg_type { static DECLARE_COMPLETION(cpu_running); +static struct smp_operations smp_ops; + +void __init smp_set_ops(struct smp_operations *ops) +{ + if (ops) + smp_ops = *ops; +}; + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -100,13 +117,64 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) return ret; } +/* platform specific SMP operations */ +void __init smp_init_cpus(void) +{ + if (smp_ops.smp_init_cpus) + smp_ops.smp_init_cpus(); +} + +static void __init platform_smp_prepare_cpus(unsigned int max_cpus) +{ + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); +} + +static void __cpuinit platform_secondary_init(unsigned int cpu) +{ + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); +} + +int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + if (smp_ops.smp_boot_secondary) + return smp_ops.smp_boot_secondary(cpu, idle); + return -ENOSYS; +} + #ifdef CONFIG_HOTPLUG_CPU static void percpu_timer_stop(void); +static int platform_cpu_kill(unsigned int cpu) +{ + if (smp_ops.cpu_kill) + return smp_ops.cpu_kill(cpu); + return 1; +} + +static void platform_cpu_die(unsigned int cpu) +{ + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); +} + +static int platform_cpu_disable(unsigned int cpu) +{ + if (smp_ops.cpu_disable) + return smp_ops.cpu_disable(cpu); + + /* + * By default, allow disabling all CPUs except the first one, + * since this is special on a lot of platforms, e.g. because + * of clock tick interrupts. + */ + return cpu == 0 ? -EPERM : 0; +} /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpu_disable(void) +int __cpuinit __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; @@ -149,7 +217,7 @@ static DECLARE_COMPLETION(cpu_died); * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpu_die(unsigned int cpu) +void __cpuinit __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); @@ -347,7 +415,8 @@ void arch_send_call_function_single_ipi(int cpu) } static const char *ipi_types[NR_IPI] = { -#define S(x,s) [x - IPI_TIMER] = s +#define S(x,s) [x] = s + S(IPI_WAKEUP, "CPU wakeup interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_RESCHEDULE, "Rescheduling interrupts"), S(IPI_CALL_FUNC, "Function call interrupts"), @@ -500,10 +569,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); - if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) - __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); + if (ipinr < NR_IPI) + __inc_irq_stat(cpu, ipi_irqs[ipinr]); switch (ipinr) { + case IPI_WAKEUP: + break; + case IPI_TIMER: irq_enter(); ipi_timer(); @@ -584,3 +656,56 @@ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, l_p_j_ref); +static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); +static unsigned long global_l_p_j_ref; +static unsigned long global_l_p_j_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu = freq->cpu; + + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + if (!per_cpu(l_p_j_ref, cpu)) { + per_cpu(l_p_j_ref, cpu) = + per_cpu(cpu_data, cpu).loops_per_jiffy; + per_cpu(l_p_j_ref_freq, cpu) = freq->old; + if (!global_l_p_j_ref) { + global_l_p_j_ref = loops_per_jiffy; + global_l_p_j_ref_freq = freq->old; + } + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || + (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, + global_l_p_j_ref_freq, + freq->new); + per_cpu(cpu_data, cpu).loops_per_jiffy = + cpufreq_scale(per_cpu(l_p_j_ref, cpu), + per_cpu(l_p_j_ref_freq, cpu), + freq->new); + } + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif |