diff options
author | Konrad Eisele <konrad@gaisler.com> | 2009-08-31 22:08:13 (GMT) |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-02 12:19:42 (GMT) |
commit | 8401707ff645521e9f21cbb8fe3b138f60e85680 (patch) | |
tree | 114287cf273b57f96d0e132cd2274c7afe60b120 /arch/sparc/kernel/leon_smp.c | |
parent | b6727b12dd2ffb4a890eb5b13a298230c29ba45d (diff) | |
download | linux-8401707ff645521e9f21cbb8fe3b138f60e85680.tar.xz |
sparc,leon: Sparc-Leon SMP support
Support SMP for a Sparc-Leon multiprocessor system.
Add Leon specific SMP code to arch/sparc/kernel/leon_smp.c.
Signed-off-by: Konrad Eisele <konrad@gaisler.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/leon_smp.c')
-rw-r--r-- | arch/sparc/kernel/leon_smp.c | 468 |
1 files changed, 468 insertions, 0 deletions
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c new file mode 100644 index 0000000..05c0dad --- /dev/null +++ b/arch/sparc/kernel/leon_smp.c @@ -0,0 +1,468 @@ +/* leon_smp.c: Sparc-Leon SMP support. + * + * based on sun4m_smp.c + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#include <asm/head.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/threads.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/profile.h> +#include <linux/pm.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +#include <asm/ptrace.h> +#include <asm/atomic.h> +#include <asm/irq_regs.h> + +#include <asm/delay.h> +#include <asm/irq.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/oplib.h> +#include <asm/cpudata.h> +#include <asm/asi.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> + +#ifdef CONFIG_SPARC_LEON + +#include "irq.h" + +extern ctxd_t *srmmu_ctx_table_phys; +static int smp_processors_ready; +extern volatile unsigned long cpu_callin_map[NR_CPUS]; +extern unsigned char boot_cpu_id; +extern cpumask_t smp_commenced_mask; +void __init leon_configure_cache_smp(void); + +static inline unsigned long do_swap(volatile unsigned long *ptr, + unsigned long val) +{ + __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val) + : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS) + : "memory"); + return val; +} + +static void smp_setup_percpu_timer(void); + +void __cpuinit leon_callin(void) +{ + int cpuid = hard_smpleon_processor_id(); + + local_flush_cache_all(); + local_flush_tlb_all(); + leon_configure_cache_smp(); + + /* Get our local ticker going. */ + smp_setup_percpu_timer(); + + calibrate_delay(); + smp_store_cpu_info(cpuid); + + local_flush_cache_all(); + local_flush_tlb_all(); + + /* + * Unblock the master CPU _only_ when the scheduler state + * of all secondary CPUs will be up-to-date, so after + * the SMP initialization the master will be just allowed + * to call the scheduler code. + * Allow master to continue. + */ + do_swap(&cpu_callin_map[cpuid], 1); + + local_flush_cache_all(); + local_flush_tlb_all(); + + cpu_probe(); + + /* Fix idle thread fields. */ + __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid]) + : "memory" /* paranoid */); + + /* Attach to the address space of init_task. */ + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + + while (!cpu_isset(cpuid, smp_commenced_mask)) + mb(); + + local_irq_enable(); + cpu_set(cpuid, cpu_online_map); +} + +/* + * Cycle through the processors asking the PROM to start each one. + */ + +extern struct linux_prom_registers smp_penguin_ctable; + +void __init leon_configure_cache_smp(void) +{ + unsigned long cfg = sparc_leon3_get_dcachecfg(); + int me = smp_processor_id(); + + if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) { + printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n", + (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg), + (unsigned int)cfg, (unsigned int)me); + sparc_leon3_disable_cache(); + } else { + if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) { + sparc_leon3_enable_snooping(); + } else { + printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n", + me); + sparc_leon3_disable_cache(); + } + } + + local_flush_cache_all(); + local_flush_tlb_all(); +} + +void leon_smp_setbroadcast(unsigned int mask) +{ + int broadcast = + ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> + LEON3_IRQMPSTATUS_BROADCAST) & 1); + if (!broadcast) { + prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n", + leon_smp_nrcpus()); + if (leon_smp_nrcpus() > 1) { + BUG(); + } else { + prom_printf("continue anyway\n"); + return; + } + } + LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); +} + +unsigned int leon_smp_getbroadcast(void) +{ + unsigned int mask; + mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast)); + return mask; +} + +int leon_smp_nrcpus(void) +{ + int nrcpu = + ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> + LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1; + return nrcpu; +} + +void __init leon_boot_cpus(void) +{ + int nrcpu = leon_smp_nrcpus(); + int me = smp_processor_id(); + + printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x \n", (unsigned int)me, + (unsigned int)nrcpu, (unsigned int)NR_CPUS, + (unsigned int)&(leon3_irqctrl_regs->mpstatus)); + + leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); + leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); + leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me); + + leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); + + leon_configure_cache_smp(); + smp_setup_percpu_timer(); + local_flush_cache_all(); + +} + +int __cpuinit leon_boot_one_cpu(int i) +{ + + struct task_struct *p; + int timeout; + + /* Cook up an idler for this guy. */ + p = fork_idle(i); + + current_set[i] = task_thread_info(p); + + /* See trampoline.S:leon_smp_cpu_startup for details... + * Initialize the contexts table + * Since the call to prom_startcpu() trashes the structure, + * we need to re-initialize it for each cpu + */ + smp_penguin_ctable.which_io = 0; + smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys; + smp_penguin_ctable.reg_size = 0; + + /* whirrr, whirrr, whirrrrrrrrr... */ + printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, + (unsigned int)&leon3_irqctrl_regs->mpstatus); + local_flush_cache_all(); + + LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); + + /* wheee... it's going... */ + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(200); + } + printk(KERN_INFO "Started CPU %d \n", (unsigned int)i); + + if (!(cpu_callin_map[i])) { + printk(KERN_ERR "Processor %d is stuck.\n", i); + return -ENODEV; + } else { + leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); + leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); + leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i); + } + + local_flush_cache_all(); + return 0; +} + +void __init leon_smp_done(void) +{ + + int i, first; + int *prev; + + /* setup cpu list for irq rotation */ + first = 0; + prev = &first; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) { + *prev = i; + prev = &cpu_data(i).next; + } + } + *prev = first; + local_flush_cache_all(); + + /* Free unneeded trap tables */ + if (!cpu_isset(1, cpu_present_map)) { + ClearPageReserved(virt_to_page(trapbase_cpu1)); + init_page_count(virt_to_page(trapbase_cpu1)); + free_page((unsigned long)trapbase_cpu1); + totalram_pages++; + num_physpages++; + } + if (!cpu_isset(2, cpu_present_map)) { + ClearPageReserved(virt_to_page(trapbase_cpu2)); + init_page_count(virt_to_page(trapbase_cpu2)); + free_page((unsigned long)trapbase_cpu2); + totalram_pages++; + num_physpages++; + } + if (!cpu_isset(3, cpu_present_map)) { + ClearPageReserved(virt_to_page(trapbase_cpu3)); + init_page_count(virt_to_page(trapbase_cpu3)); + free_page((unsigned long)trapbase_cpu3); + totalram_pages++; + num_physpages++; + } + /* Ok, they are spinning and ready to go. */ + smp_processors_ready = 1; + +} + +void leon_irq_rotate(int cpu) +{ +} + +static struct smp_funcall { + smpfunc_t func; + unsigned long arg1; + unsigned long arg2; + unsigned long arg3; + unsigned long arg4; + unsigned long arg5; + unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ + unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ +} ccall_info; + +static DEFINE_SPINLOCK(cross_call_lock); + +/* Cross calls must be serialized, at least currently. */ +static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + if (smp_processors_ready) { + register int high = NR_CPUS - 1; + unsigned long flags; + + spin_lock_irqsave(&cross_call_lock, flags); + + { + /* If you make changes here, make sure gcc generates proper code... */ + register smpfunc_t f asm("i0") = func; + register unsigned long a1 asm("i1") = arg1; + register unsigned long a2 asm("i2") = arg2; + register unsigned long a3 asm("i3") = arg3; + register unsigned long a4 asm("i4") = arg4; + register unsigned long a5 asm("i5") = 0; + + __asm__ __volatile__("std %0, [%6]\n\t" + "std %2, [%6 + 8]\n\t" + "std %4, [%6 + 16]\n\t" : : + "r"(f), "r"(a1), "r"(a2), "r"(a3), + "r"(a4), "r"(a5), + "r"(&ccall_info.func)); + } + + /* Init receive/complete mapping, plus fire the IPI's off. */ + { + register int i; + + cpu_clear(smp_processor_id(), mask); + cpus_and(mask, cpu_online_map, mask); + for (i = 0; i <= high; i++) { + if (cpu_isset(i, mask)) { + ccall_info.processors_in[i] = 0; + ccall_info.processors_out[i] = 0; + set_cpu_int(i, LEON3_IRQ_CROSS_CALL); + + } + } + } + + { + register int i; + + i = 0; + do { + if (!cpu_isset(i, mask)) + continue; + + while (!ccall_info.processors_in[i]) + barrier(); + } while (++i <= high); + + i = 0; + do { + if (!cpu_isset(i, mask)) + continue; + + while (!ccall_info.processors_out[i]) + barrier(); + } while (++i <= high); + } + + spin_unlock_irqrestore(&cross_call_lock, flags); + } +} + +/* Running cross calls. */ +void leon_cross_call_irq(void) +{ + int i = smp_processor_id(); + + ccall_info.processors_in[i] = 1; + ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, + ccall_info.arg4, ccall_info.arg5); + ccall_info.processors_out[i] = 1; +} + +void leon_percpu_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + int cpu = smp_processor_id(); + + old_regs = set_irq_regs(regs); + + leon_clear_profile_irq(cpu); + + profile_tick(CPU_PROFILING); + + if (!--prof_counter(cpu)) { + int user = user_mode(regs); + + irq_enter(); + update_process_times(user); + irq_exit(); + + prof_counter(cpu) = prof_multiplier(cpu); + } + set_irq_regs(old_regs); +} + +static void __init smp_setup_percpu_timer(void) +{ + int cpu = smp_processor_id(); + + prof_counter(cpu) = prof_multiplier(cpu) = 1; +} + +void __init leon_blackbox_id(unsigned *addr) +{ + int rd = *addr & 0x3e000000; + int rs1 = rd >> 11; + + /* patch places where ___b_hard_smp_processor_id appears */ + addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ + addr[1] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ + addr[2] = 0x01000000; /* nop */ +} + +void __init leon_blackbox_current(unsigned *addr) +{ + int rd = *addr & 0x3e000000; + int rs1 = rd >> 11; + + /* patch LOAD_CURRENT macro where ___b_load_current appears */ + addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ + addr[2] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ + addr[4] = 0x81282002 | rd | rs1; /* sll reg, 0x2, reg */ + +} + +/* + * CPU idle callback function + * See .../arch/sparc/kernel/process.c + */ +void pmc_leon_idle(void) +{ + __asm__ volatile ("mov %g0, %asr19"); +} + +void __init leon_init_smp(void) +{ + /* Patch ipi15 trap table */ + t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m); + + BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id); + BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current); + BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id, + BTFIXUPCALL_NORM); + +#ifndef PMC_NO_IDLE + /* Assign power management IDLE handler */ + pm_idle = pmc_leon_idle; + printk(KERN_INFO "leon: power management initialized\n"); +#endif + +} + +#endif /* CONFIG_SPARC_LEON */ |