diff options
Diffstat (limited to 'arch/arm/kernel')
30 files changed, 533 insertions, 843 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5bbec7b..5f3338e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o +obj-$(CONFIG_ARM_PSCI) += psci.o extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index c8ef207..d957a51 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -9,516 +9,53 @@ * published by the Free Software Foundation. */ #include <linux/init.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/smp.h> -#include <linux/cpu.h> -#include <linux/jiffies.h> -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/of_irq.h> -#include <linux/io.h> +#include <linux/types.h> +#include <linux/errno.h> -#include <asm/cputype.h> #include <asm/delay.h> -#include <asm/localtimer.h> -#include <asm/arch_timer.h> -#include <asm/system_info.h> #include <asm/sched_clock.h> -static unsigned long arch_timer_rate; +#include <clocksource/arm_arch_timer.h> -enum ppi_nr { - PHYS_SECURE_PPI, - PHYS_NONSECURE_PPI, - VIRT_PPI, - HYP_PPI, - MAX_TIMER_PPI -}; - -static int arch_timer_ppi[MAX_TIMER_PPI]; - -static struct clock_event_device __percpu **arch_timer_evt; -static struct delay_timer arch_delay_timer; - -static bool arch_timer_use_virtual = true; - -/* - * Architected system timer support. - */ - -#define ARCH_TIMER_CTRL_ENABLE (1 << 0) -#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) -#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) - -#define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_FREQ 1 -#define ARCH_TIMER_REG_TVAL 2 - -#define ARCH_TIMER_PHYS_ACCESS 0 -#define ARCH_TIMER_VIRT_ACCESS 1 - -/* - * These register accessors are marked inline so the compiler can - * nicely work out which register we want, and chuck away the rest of - * the code. At least it does so with a recent GCC (4.6.3). - */ -static inline void arch_timer_reg_write(const int access, const int reg, u32 val) -{ - if (access == ARCH_TIMER_PHYS_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); - break; - } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); - break; - } - } - - isb(); -} - -static inline u32 arch_timer_reg_read(const int access, const int reg) -{ - u32 val = 0; - - if (access == ARCH_TIMER_PHYS_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); - break; - case ARCH_TIMER_REG_FREQ: - asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); - break; - } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); - break; - } - } - - return val; -} - -static inline cycle_t arch_timer_counter_read(const int access) -{ - cycle_t cval = 0; - - if (access == ARCH_TIMER_PHYS_ACCESS) - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); - - if (access == ARCH_TIMER_VIRT_ACCESS) - asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); - - return cval; -} - -static inline cycle_t arch_counter_get_cntpct(void) -{ - return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS); -} - -static inline cycle_t arch_counter_get_cntvct(void) -{ - return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS); -} - -static irqreturn_t inline timer_handler(const int access, - struct clock_event_device *evt) -{ - unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { - ctrl |= ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); - evt->event_handler(evt); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) -{ - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; - - return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); -} - -static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) -{ - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; - - return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); -} - -static inline void timer_set_mode(const int access, int mode) -{ - unsigned long ctrl; - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - ctrl &= ~ARCH_TIMER_CTRL_ENABLE; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); - break; - default: - break; - } -} - -static void arch_timer_set_mode_virt(enum clock_event_mode mode, - struct clock_event_device *clk) -{ - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); -} - -static void arch_timer_set_mode_phys(enum clock_event_mode mode, - struct clock_event_device *clk) -{ - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); -} - -static inline void set_next_event(const int access, unsigned long evt) -{ - unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - ctrl |= ARCH_TIMER_CTRL_ENABLE; - ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); -} - -static int arch_timer_set_next_event_virt(unsigned long evt, - struct clock_event_device *unused) -{ - set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); - return 0; -} - -static int arch_timer_set_next_event_phys(unsigned long evt, - struct clock_event_device *unused) -{ - set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); - return 0; -} - -static int __cpuinit arch_timer_setup(struct clock_event_device *clk) -{ - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; - clk->name = "arch_sys_timer"; - clk->rating = 450; - if (arch_timer_use_virtual) { - clk->irq = arch_timer_ppi[VIRT_PPI]; - clk->set_mode = arch_timer_set_mode_virt; - clk->set_next_event = arch_timer_set_next_event_virt; - } else { - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; - clk->set_mode = arch_timer_set_mode_phys; - clk->set_next_event = arch_timer_set_next_event_phys; - } - - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); - - clockevents_config_and_register(clk, arch_timer_rate, - 0xf, 0x7fffffff); - - *__this_cpu_ptr(arch_timer_evt) = clk; - - if (arch_timer_use_virtual) - enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); - else { - enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); - } - - return 0; -} - -/* Is the optional system timer available? */ -static int local_timer_is_architected(void) -{ - return (cpu_architecture() >= CPU_ARCH_ARMv7) && - ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1; -} - -static int arch_timer_available(void) -{ - unsigned long freq; - - if (!local_timer_is_architected()) - return -ENXIO; - - if (arch_timer_rate == 0) { - freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, - ARCH_TIMER_REG_FREQ); - - /* Check the timer frequency. */ - if (freq == 0) { - pr_warn("Architected timer frequency not available\n"); - return -EINVAL; - } - - arch_timer_rate = freq; - } - - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", - arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100, - arch_timer_use_virtual ? "virt" : "phys"); - return 0; -} - -static u32 notrace arch_counter_get_cntpct32(void) -{ - cycle_t cnt = arch_counter_get_cntpct(); - - /* - * The sched_clock infrastructure only knows about counters - * with at most 32bits. Forget about the upper 24 bits for the - * time being... - */ - return (u32)cnt; -} - -static u32 notrace arch_counter_get_cntvct32(void) -{ - cycle_t cnt = arch_counter_get_cntvct(); - - /* - * The sched_clock infrastructure only knows about counters - * with at most 32bits. Forget about the upper 24 bits for the - * time being... - */ - return (u32)cnt; -} - -static cycle_t arch_counter_read(struct clocksource *cs) -{ - /* - * Always use the physical counter for the clocksource. - * CNTHCTL.PL1PCTEN must be set to 1. - */ - return arch_counter_get_cntpct(); -} - -static unsigned long arch_timer_read_current_timer(void) +static unsigned long arch_timer_read_counter_long(void) { - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } -static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +static u32 arch_timer_read_counter_u32(void) { - /* - * Always use the physical counter for the clocksource. - * CNTHCTL.PL1PCTEN must be set to 1. - */ - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } -static struct clocksource clocksource_counter = { - .name = "arch_sys_counter", - .rating = 400, - .read = arch_counter_read, - .mask = CLOCKSOURCE_MASK(56), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static struct cyclecounter cyclecounter = { - .read = arch_counter_read_cc, - .mask = CLOCKSOURCE_MASK(56), -}; - -static struct timecounter timecounter; - -struct timecounter *arch_timer_get_timecounter(void) -{ - return &timecounter; -} - -static void __cpuinit arch_timer_stop(struct clock_event_device *clk) -{ - pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", - clk->irq, smp_processor_id()); - - if (arch_timer_use_virtual) - disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); - else { - disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); - } - - clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); -} - -static struct local_timer_ops arch_timer_ops __cpuinitdata = { - .setup = arch_timer_setup, - .stop = arch_timer_stop, -}; - -static struct clock_event_device arch_timer_global_evt; +static struct delay_timer arch_delay_timer; -static int __init arch_timer_register(void) +static void __init arch_timer_delay_timer_register(void) { - int err; - int ppi; - - err = arch_timer_available(); - if (err) - goto out; - - arch_timer_evt = alloc_percpu(struct clock_event_device *); - if (!arch_timer_evt) { - err = -ENOMEM; - goto out; - } - - clocksource_register_hz(&clocksource_counter, arch_timer_rate); - cyclecounter.mult = clocksource_counter.mult; - cyclecounter.shift = clocksource_counter.shift; - timecounter_init(&timecounter, &cyclecounter, - arch_counter_get_cntpct()); - - if (arch_timer_use_virtual) { - ppi = arch_timer_ppi[VIRT_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_virt, - "arch_timer", arch_timer_evt); - } else { - ppi = arch_timer_ppi[PHYS_SECURE_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_phys, - "arch_timer", arch_timer_evt); - if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { - ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_phys, - "arch_timer", arch_timer_evt); - if (err) - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], - arch_timer_evt); - } - } - - if (err) { - pr_err("arch_timer: can't register interrupt %d (%d)\n", - ppi, err); - goto out_free; - } - - err = local_timer_register(&arch_timer_ops); - if (err) { - /* - * We couldn't register as a local timer (could be - * because we're on a UP platform, or because some - * other local timer is already present...). Try as a - * global timer instead. - */ - arch_timer_global_evt.cpumask = cpumask_of(0); - err = arch_timer_setup(&arch_timer_global_evt); - } - if (err) - goto out_free_irq; - /* Use the architected timer for the delay loop. */ - arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; - arch_delay_timer.freq = arch_timer_rate; + arch_delay_timer.read_current_timer = arch_timer_read_counter_long; + arch_delay_timer.freq = arch_timer_get_rate(); register_current_timer_delay(&arch_delay_timer); - return 0; - -out_free_irq: - if (arch_timer_use_virtual) - free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); - else { - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], - arch_timer_evt); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], - arch_timer_evt); - } - -out_free: - free_percpu(arch_timer_evt); -out: - return err; } -static const struct of_device_id arch_timer_of_match[] __initconst = { - { .compatible = "arm,armv7-timer", }, - {}, -}; - int __init arch_timer_of_register(void) { - struct device_node *np; - u32 freq; - int i; - - np = of_find_matching_node(NULL, arch_timer_of_match); - if (!np) { - pr_err("arch_timer: can't find DT node\n"); - return -ENODEV; - } - - /* Try to determine the frequency from the device tree or CNTFRQ */ - if (!of_property_read_u32(np, "clock-frequency", &freq)) - arch_timer_rate = freq; - - for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) - arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + int ret; - /* - * If no interrupt provided for virtual timer, we'll have to - * stick to the physical timer. It'd better be accessible... - */ - if (!arch_timer_ppi[VIRT_PPI]) { - arch_timer_use_virtual = false; + ret = arch_timer_init(); + if (ret) + return ret; - if (!arch_timer_ppi[PHYS_SECURE_PPI] || - !arch_timer_ppi[PHYS_NONSECURE_PPI]) { - pr_warn("arch_timer: No interrupt available, giving up\n"); - return -EINVAL; - } - } + arch_timer_delay_timer_register(); - return arch_timer_register(); + return 0; } int __init arch_timer_sched_clock_init(void) { - u32 (*cnt32)(void); - int err; - - err = arch_timer_available(); - if (err) - return err; - - if (arch_timer_use_virtual) - cnt32 = arch_counter_get_cntvct32; - else - cnt32 = arch_counter_get_cntpct32; + if (arch_timer_get_rate() == 0) + return -ENXIO; - setup_sched_clock(cnt32, 32, arch_timer_rate); + setup_sched_clock(arch_timer_read_counter_u32, + 32, arch_timer_get_rate()); return 0; } diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c985b48..5ce738b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,9 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> +#ifdef CONFIG_KVM_ARM_HOST +#include <linux/kvm_host.h> +#endif #include <asm/cacheflush.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> @@ -146,5 +149,45 @@ int main(void) DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); +#ifdef CONFIG_KVM_ARM_HOST + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); + DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); + DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); + DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); + DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); + DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); + DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); + DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); + DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); + DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); + DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); + DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); + DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); + DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); + DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); + DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); + DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); + DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); + DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); +#ifdef CONFIG_KVM_ARM_VGIC + DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); + DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); + DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); + DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); + DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); + DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); + DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); + DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); + DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); +#ifdef CONFIG_KVM_ARM_TIMER + DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); + DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); + DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); + DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); +#endif + DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); +#endif + DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); +#endif return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 9b72261..a1f73b5 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -78,7 +78,7 @@ void pcibios_report_status(u_int status_mask, int warn) * Bug 3 is responsible for the sound DMA grinding to a halt. We now * live with bug 2. */ -static void __devinit pci_fixup_83c553(struct pci_dev *dev) +static void pci_fixup_83c553(struct pci_dev *dev) { /* * Set memory region to start at address 0, and enable IO @@ -130,7 +130,7 @@ static void __devinit pci_fixup_83c553(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553); -static void __devinit pci_fixup_unassign(struct pci_dev *dev) +static void pci_fixup_unassign(struct pci_dev *dev) { dev->resource[0].end -= dev->resource[0].start; dev->resource[0].start = 0; @@ -142,7 +142,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, * if it is the host bridge by marking it as such. These resources are of * no consequence to the PCI layer (they are handled elsewhere). */ -static void __devinit pci_fixup_dec21285(struct pci_dev *dev) +static void pci_fixup_dec21285(struct pci_dev *dev) { int i; @@ -161,7 +161,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_d /* * PCI IDE controllers use non-standard I/O port decoding, respect it. */ -static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) +static void pci_fixup_ide_bases(struct pci_dev *dev) { struct resource *r; int i; @@ -182,7 +182,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); /* * Put the DEC21142 to sleep */ -static void __devinit pci_fixup_dec21142(struct pci_dev *dev) +static void pci_fixup_dec21142(struct pci_dev *dev) { pci_write_config_dword(dev, 0x40, 0x80000000); } @@ -204,7 +204,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_d * functional. However, The CY82C693U _does not work_ in bus * master mode without locking the PCI bus solid. */ -static void __devinit pci_fixup_cy82c693(struct pci_dev *dev) +static void pci_fixup_cy82c693(struct pci_dev *dev) { if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) { u32 base0, base1; @@ -254,7 +254,7 @@ static void __devinit pci_fixup_cy82c693(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693); -static void __devinit pci_fixup_it8152(struct pci_dev *dev) +static void pci_fixup_it8152(struct pci_dev *dev) { int i; /* fixup for ITE 8152 devices */ @@ -361,9 +361,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n", bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); } -#ifdef CONFIG_HOTPLUG EXPORT_SYMBOL(pcibios_fixup_bus); -#endif /* * Swizzle the device pin each time we cross a bridge. If a platform does @@ -380,7 +378,7 @@ EXPORT_SYMBOL(pcibios_fixup_bus); * PCI standard swizzle is implemented on plug-in cards and Cardbus based * PCI extenders, so it can not be ignored. */ -static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin) +static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin) { struct pci_sys_data *sys = dev->sysdata; int slot, oldpin = *pin; @@ -415,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return irq; } -static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) { int ret; struct pci_host_bridge_window *window; @@ -447,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) return 0; } -static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) { struct pci_sys_data *sys = NULL; int ret; @@ -466,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) sys->map_irq = hw->map_irq; INIT_LIST_HEAD(&sys->resources); + if (hw->private_data) + sys->private_data = hw->private_data[nr]; + ret = hw->setup(nr, sys); if (ret > 0) { @@ -495,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) } } -void __init pci_common_init(struct hw_pci *hw) +void pci_common_init(struct hw_pci *hw) { struct pci_sys_data *sys; LIST_HEAD(head); diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 5935b6a..0cc5761 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -195,7 +195,7 @@ CALL(sys_getcwd) CALL(sys_capget) /* 185 */ CALL(sys_capset) - CALL(sys_sigaltstack_wrapper) + CALL(sys_sigaltstack) CALL(sys_sendfile) CALL(sys_ni_syscall) /* getpmsg */ CALL(sys_ni_syscall) /* putpmsg */ @@ -388,6 +388,7 @@ CALL(sys_process_vm_readv) CALL(sys_process_vm_writev) CALL(sys_ni_syscall) /* reserved for sys_kcmp */ + CALL(sys_finit_module) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 6809200..14f7c3b 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -100,12 +100,14 @@ ENTRY(printch) b 1b ENDPROC(printch) +#ifdef CONFIG_MMU ENTRY(debug_ll_addr) addruart r2, r3, ip str r2, [r0] str r3, [r1] mov pc, lr ENDPROC(debug_ll_addr) +#endif #else diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index a6c301e..3248cde 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -514,11 +514,6 @@ sys_rt_sigreturn_wrapper: b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) -sys_sigaltstack_wrapper: - ldr r2, [sp, #S_OFF + S_SP] - b do_sigaltstack -ENDPROC(sys_sigaltstack_wrapper) - sys_statfs64_wrapper: teq r1, #88 moveq r1, #84 diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 36d20bd..9b6de8c 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c @@ -339,7 +339,7 @@ static struct miscdevice etb_miscdev = { .fops = &etb_fops, }; -static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id) +static int etb_probe(struct amba_device *dev, const struct amba_id *id) { struct tracectx *t = &tracer; int ret = 0; @@ -531,7 +531,7 @@ static ssize_t trace_mode_store(struct kobject *kobj, static struct kobj_attribute trace_mode_attr = __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); -static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id) +static int etm_probe(struct amba_device *dev, const struct amba_id *id) { struct tracectx *t = &tracer; int ret = 0; diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351..486a15a 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -246,6 +246,7 @@ __create_page_tables: /* * Then map boot params address in r2 if specified. + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -253,6 +254,8 @@ __create_page_tables: addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 + strne r6, [r3], #1 << PMD_ORDER + addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #ifdef CONFIG_DEBUG_LL @@ -331,7 +334,7 @@ ENTRY(secondary_startup) * as it has already been validated by the primary processor. */ #ifdef CONFIG_ARM_VIRT_EXT - bl __hyp_stub_install + bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r9 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5ff2e77..5eae53e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -28,6 +28,7 @@ #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/smp.h> +#include <linux/cpu_pm.h> #include <asm/cacheflush.h> #include <asm/cputype.h> @@ -35,6 +36,7 @@ #include <asm/hw_breakpoint.h> #include <asm/kdebug.h> #include <asm/traps.h> +#include <asm/hardware/coresight.h> /* Breakpoint currently in use for each BRP. */ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); @@ -49,6 +51,9 @@ static int core_num_wrps; /* Debug architecture version. */ static u8 debug_arch; +/* Does debug architecture support OS Save and Restore? */ +static bool has_ossr; + /* Maximum supported watchpoint length. */ static u8 max_watchpoint_len; @@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = { .fn = debug_reg_trap, }; +/* Does this core support OS Save and Restore? */ +static bool core_has_os_save_restore(void) +{ + u32 oslsr; + + switch (get_debug_arch()) { + case ARM_DEBUG_ARCH_V7_1: + return true; + case ARM_DEBUG_ARCH_V7_ECP14: + ARM_DBG_READ(c1, c1, 4, oslsr); + if (oslsr & ARM_OSLSR_OSLM0) + return true; + default: + return false; + } +} + static void reset_ctrl_regs(void *unused) { int i, raw_num_brps, err = 0, cpu = smp_processor_id(); @@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused) if ((val & 0x1) == 0) err = -EPERM; - /* - * Check whether we implement OS save and restore. - */ - ARM_DBG_READ(c1, c1, 4, val); - if ((val & 0x9) == 0) + if (!has_ossr) goto clear_vcr; break; case ARM_DEBUG_ARCH_V7_1: @@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused) /* * Unconditionally clear the OS lock by writing a value - * other than 0xC5ACCE55 to the access register. + * other than CS_LAR_KEY to the access register. */ - ARM_DBG_WRITE(c1, c0, 4, 0); + ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY); isb(); /* @@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { .notifier_call = dbg_reset_notify, }; +#ifdef CONFIG_CPU_PM +static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, + void *v) +{ + if (action == CPU_PM_EXIT) + reset_ctrl_regs(NULL); + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { + .notifier_call = dbg_cpu_pm_notify, +}; + +static void __init pm_init(void) +{ + cpu_pm_register_notifier(&dbg_cpu_pm_nb); +} +#else +static inline void pm_init(void) +{ +} +#endif + static int __init arch_hw_breakpoint_init(void) { debug_arch = get_debug_arch(); @@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void) return 0; } + has_ossr = core_has_os_save_restore(); + /* Determine how many BRPs/WRPs are available. */ core_num_brps = get_num_brps(); core_num_wrps = get_num_wrps(); @@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void) hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, "breakpoint debug exception"); - /* Register hotplug notifier. */ + /* Register hotplug and PM notifiers. */ register_cpu_notifier(&dbg_reset_nb); + pm_init(); return 0; } arch_initcall(arch_hw_breakpoint_init); diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 65b2417..1315c4c 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary) * immediately. */ compare_cpu_mode_with_primary r4, r5, r6, r7 - bxne lr + movne pc, lr /* * Once we have given up on one CPU, we do not try to install the @@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary) */ cmp r4, #HYP_MODE - bxne lr @ give up if the CPU is not in HYP mode + movne pc, lr @ give up if the CPU is not in HYP mode /* * Configure HSCTLR to set correct exception endianness/instruction set @@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary) * Eventually, CPU-specific code might be needed -- assume not for now * * This code relies on the "eret" instruction to synchronize the - * various coprocessor accesses. + * various coprocessor accesses. This is done when we switch to SVC + * (see safe_svcmode_maskall). */ @ Now install the hypervisor stub: adr r7, __hyp_stub_vectors @@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE 1: #endif - bic r7, r4, #MODE_MASK - orr r7, r7, #SVC_MODE -THUMB( orr r7, r7, #PSR_T_BIT ) - msr spsr_cxsf, r7 @ This is SPSR_hyp. - - __MSR_ELR_HYP(14) @ msr elr_hyp, lr - __ERET @ return, switching to SVC mode - @ The boot CPU mode is left in r4. + bx lr @ The boot CPU mode is left in r4. ENDPROC(__hyp_stub_install_secondary) __hyp_stub_do_trap: @@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors) @ fall through ENTRY(__hyp_set_vectors) __HVC(0) - bx lr + mov pc, lr ENDPROC(__hyp_set_vectors) #ifndef ZIMAGE diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 8961650..8e4ef4c 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -117,6 +117,16 @@ void __init init_IRQ(void) machine_desc->init_irq(); } +#ifdef CONFIG_MULTI_IRQ_HANDLER +void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) +{ + if (handle_arch_irq) + return; + + handle_arch_irq = handle_irq; +} +#endif + #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 4dd41fc..170e9f3 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; @@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) * real return address, and all the rest will point to * kretprobe_trampoline */ - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_hash_unlock(current, &flags); - hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f9e8657..31e0eb3 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -149,12 +149,6 @@ again: static void armpmu_read(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - - /* Don't read disabled counters! */ - if (hwc->idx < 0) - return; - armpmu_event_update(event); } @@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - WARN_ON(idx < 0); - armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); @@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - int mapping, err; + int mapping; mapping = armpmu->map_event(event); @@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } - err = 0; if (event->group_leader != event) { - err = validate_group(event); - if (err) + if (validate_group(event) != 0); return -EINVAL; } - return err; + return 0; } static int armpmu_event_init(struct perf_event *event) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 9a4f630..1f2740e 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -132,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } -static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) +static void cpu_pmu_init(struct arm_pmu *cpu_pmu) { int cpu; for_each_possible_cpu(cpu) { @@ -147,7 +147,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->free_irq = cpu_pmu_free_irq; /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu && cpu_pmu->reset) + if (cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); } @@ -178,7 +178,7 @@ static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { /* * PMU platform driver and devicetree bindings. */ -static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = { +static struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, @@ -190,7 +190,7 @@ static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = { {}, }; -static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { +static struct platform_device_id cpu_pmu_plat_device_ids[] = { {.name = "arm-pmu"}, {}, }; @@ -198,51 +198,49 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { /* * CPU PMU identification and probing. */ -static int __devinit probe_current_pmu(struct arm_pmu *pmu) +static int probe_current_pmu(struct arm_pmu *pmu) { int cpu = get_cpu(); - unsigned long cpuid = read_cpuid_id(); - unsigned long implementor = (cpuid & 0xFF000000) >> 24; - unsigned long part_number = (cpuid & 0xFFF0); + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_number = read_cpuid_part_number(); int ret = -ENODEV; pr_info("probing PMU on CPU %d\n", cpu); /* ARM Ltd CPUs. */ - if (0x41 == implementor) { + if (implementor == ARM_CPU_IMP_ARM) { switch (part_number) { - case 0xB360: /* ARM1136 */ - case 0xB560: /* ARM1156 */ - case 0xB760: /* ARM1176 */ + case ARM_CPU_PART_ARM1136: + case ARM_CPU_PART_ARM1156: + case ARM_CPU_PART_ARM1176: ret = armv6pmu_init(pmu); break; - case 0xB020: /* ARM11mpcore */ + case ARM_CPU_PART_ARM11MPCORE: ret = armv6mpcore_pmu_init(pmu); break; - case 0xC080: /* Cortex-A8 */ + case ARM_CPU_PART_CORTEX_A8: ret = armv7_a8_pmu_init(pmu); break; - case 0xC090: /* Cortex-A9 */ + case ARM_CPU_PART_CORTEX_A9: ret = armv7_a9_pmu_init(pmu); break; - case 0xC050: /* Cortex-A5 */ + case ARM_CPU_PART_CORTEX_A5: ret = armv7_a5_pmu_init(pmu); break; - case 0xC0F0: /* Cortex-A15 */ + case ARM_CPU_PART_CORTEX_A15: ret = armv7_a15_pmu_init(pmu); break; - case 0xC070: /* Cortex-A7 */ + case ARM_CPU_PART_CORTEX_A7: ret = armv7_a7_pmu_init(pmu); break; } /* Intel CPUs [xscale]. */ - } else if (0x69 == implementor) { - part_number = (cpuid >> 13) & 0x7; - switch (part_number) { - case 1: + } else if (implementor == ARM_CPU_IMP_INTEL) { + switch (xscale_cpu_arch_version()) { + case ARM_CPU_XSCALE_ARCH_V1: ret = xscale1pmu_init(pmu); break; - case 2: + case ARM_CPU_XSCALE_ARCH_V2: ret = xscale2pmu_init(pmu); break; } @@ -252,7 +250,7 @@ static int __devinit probe_current_pmu(struct arm_pmu *pmu) return ret; } -static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) +static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; int (*init_fn)(struct arm_pmu *); @@ -279,17 +277,22 @@ static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) } if (ret) { - pr_info("failed to register PMU devices!"); - kfree(pmu); - return ret; + pr_info("failed to probe PMU!"); + goto out_free; } cpu_pmu = pmu; cpu_pmu->plat_device = pdev; cpu_pmu_init(cpu_pmu); - armpmu_register(cpu_pmu, PERF_TYPE_RAW); + ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); - return 0; + if (!ret) + return 0; + +out_free: + pr_info("failed to register PMU devices!"); + kfree(pmu); + return ret; } static struct platform_driver cpu_pmu_driver = { diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index f3e22ff..03664b0 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -653,7 +653,7 @@ static int armv6_map_event(struct perf_event *event) &armv6_perf_cache_map, 0xFF); } -static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu) +static int armv6pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "v6"; cpu_pmu->handle_irq = armv6pmu_handle_irq; @@ -685,7 +685,7 @@ static int armv6mpcore_map_event(struct perf_event *event) &armv6mpcore_perf_cache_map, 0xFF); } -static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) +static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "v6mpcore"; cpu_pmu->handle_irq = armv6pmu_handle_irq; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 7d0cce8..8c79a9e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, /* * The prefetch counters don't differentiate between the I @@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -1226,7 +1226,7 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->max_period = (1LLU << 32) - 1; }; -static u32 __devinit armv7_read_num_pmnc_events(void) +static u32 armv7_read_num_pmnc_events(void) { u32 nb_cnt; @@ -1237,7 +1237,7 @@ static u32 __devinit armv7_read_num_pmnc_events(void) return nb_cnt + 1; } -static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) +static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "ARMv7 Cortex-A8"; @@ -1246,7 +1246,7 @@ static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) return 0; } -static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) +static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "ARMv7 Cortex-A9"; @@ -1255,7 +1255,7 @@ static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) return 0; } -static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) +static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "ARMv7 Cortex-A5"; @@ -1264,7 +1264,7 @@ static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) return 0; } -static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) +static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "ARMv7 Cortex-A15"; @@ -1274,7 +1274,7 @@ static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) return 0; } -static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) +static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "ARMv7 Cortex-A7"; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 0c8265e..63990c4 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -440,7 +440,7 @@ static int xscale_map_event(struct perf_event *event) &xscale_perf_cache_map, 0xFF); } -static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu) +static int xscale1pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "xscale1"; cpu_pmu->handle_irq = xscale1pmu_handle_irq; @@ -810,7 +810,7 @@ static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) } } -static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu) +static int xscale2pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "xscale2"; cpu_pmu->handle_irq = xscale2pmu_handle_irq; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c6dec5f..047d3e4 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -172,14 +172,9 @@ static void default_idle(void) local_irq_enable(); } -void (*pm_idle)(void) = default_idle; -EXPORT_SYMBOL(pm_idle); - /* - * The idle thread, has rather strange semantics for calling pm_idle, - * but this is what x86 does and we need to do the same, so that - * things like cpuidle get called in the same way. The only difference - * is that we always respect 'hlt_counter' to prevent low power idle. + * The idle thread. + * We always respect 'hlt_counter' to prevent low power idle. */ void cpu_idle(void) { @@ -210,10 +205,10 @@ void cpu_idle(void) } else if (!need_resched()) { stop_critical_timings(); if (cpuidle_idle_call()) - pm_idle(); + default_idle(); start_critical_timings(); /* - * pm_idle functions must always + * default_idle functions must always * return with IRQs enabled. */ WARN_ON(irqs_disabled()); diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c new file mode 100644 index 0000000..3653164 --- /dev/null +++ b/arch/arm/kernel/psci.c @@ -0,0 +1,211 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include <linux/init.h> +#include <linux/of.h> + +#include <asm/compiler.h> +#include <asm/errno.h> +#include <asm/opcodes-sec.h> +#include <asm/opcodes-virt.h> +#include <asm/psci.h> + +struct psci_operations psci_ops; + +static int (*invoke_psci_fn)(u32, u32, u32, u32); + +enum psci_function { + PSCI_FN_CPU_SUSPEND, + PSCI_FN_CPU_ON, + PSCI_FN_CPU_OFF, + PSCI_FN_MIGRATE, + PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +#define PSCI_RET_SUCCESS 0 +#define PSCI_RET_EOPNOTSUPP -1 +#define PSCI_RET_EINVAL -2 +#define PSCI_RET_EPERM -3 + +static int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_EOPNOTSUPP: + return -EOPNOTSUPP; + case PSCI_RET_EINVAL: + return -EINVAL; + case PSCI_RET_EPERM: + return -EPERM; + }; + + return -EINVAL; +} + +#define PSCI_POWER_STATE_ID_MASK 0xffff +#define PSCI_POWER_STATE_ID_SHIFT 0 +#define PSCI_POWER_STATE_TYPE_MASK 0x1 +#define PSCI_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_POWER_STATE_AFFL_MASK 0x3 +#define PSCI_POWER_STATE_AFFL_SHIFT 24 + +static u32 psci_power_state_pack(struct psci_power_state state) +{ + return ((state.id & PSCI_POWER_STATE_ID_MASK) + << PSCI_POWER_STATE_ID_SHIFT) | + ((state.type & PSCI_POWER_STATE_TYPE_MASK) + << PSCI_POWER_STATE_TYPE_SHIFT) | + ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) + << PSCI_POWER_STATE_AFFL_SHIFT); +} + +/* + * The following two functions are invoked via the invoke_psci_fn pointer + * and will not be inlined, allowing us to piggyback on the AAPCS. + */ +static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, + u32 arg2) +{ + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r1") + __asmeq("%2", "r2") + __asmeq("%3", "r3") + __HVC(0) + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, + u32 arg2) +{ + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r1") + __asmeq("%2", "r2") + __asmeq("%3", "r3") + __SMC(0) + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static int psci_cpu_suspend(struct psci_power_state state, + unsigned long entry_point) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_off(struct psci_power_state state) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_OFF]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_ON]; + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_MIGRATE]; + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", }, + {}, +}; + +static int __init psci_init(void) +{ + struct device_node *np; + const char *method; + u32 id; + + np = of_find_matching_node(NULL, psci_of_match); + if (!np) + return 0; + + pr_info("probing function IDs from device-tree\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warning("missing \"method\" property\n"); + goto out_put_node; + } + + if (!strcmp("hvc", method)) { + invoke_psci_fn = __invoke_psci_fn_hvc; + } else if (!strcmp("smc", method)) { + invoke_psci_fn = __invoke_psci_fn_smc; + } else { + pr_warning("invalid \"method\" property: %s\n", method); + goto out_put_node; + } + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_function_id[PSCI_FN_CPU_SUSPEND] = id; + psci_ops.cpu_suspend = psci_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_function_id[PSCI_FN_CPU_OFF] = id; + psci_ops.cpu_off = psci_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_function_id[PSCI_FN_CPU_ON] = id; + psci_ops.cpu_on = psci_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_function_id[PSCI_FN_MIGRATE] = id; + psci_ops.migrate = psci_migrate; + } + +out_put_node: + of_node_put(np); + return 0; +} +early_initcall(psci_init); diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e..bd6f56b 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); - cd.epoch_cyc = cyc; + cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); - cd.epoch_cyc_copy = cyc; + cd.epoch_cyc = cyc; raw_local_irq_restore(flags); } diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 9a89bf4..3f6cbb2 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -733,7 +733,7 @@ void __init setup_arch(char **cmdline_p) setup_processor(); mdesc = setup_machine_fdt(__atags_pointer); if (!mdesc) - mdesc = setup_machine_tags(__atags_pointer, machine_arch_type); + mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); machine_desc = mdesc; machine_name = mdesc->name; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 56f72d2..296786b 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -45,48 +45,6 @@ const unsigned long sigreturn_codes[7] = { MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; -/* - * atomically swap in the new signal mask, and wait for a signal. - */ -asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) -{ - sigset_t blocked; - siginitset(&blocked, mask); - return sigsuspend(&blocked); -} - -asmlinkage int -sys_sigaction(int sig, const struct old_sigaction __user *act, - struct old_sigaction __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - if (act) { - old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { @@ -300,7 +258,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigframe(regs, &frame->sig)) goto badframe; - if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) + if (restore_altstack(&frame->sig.uc.uc_stack)) goto badframe; return regs->ARM_r0; @@ -360,18 +318,12 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) } static inline void __user * -get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) +get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) { - unsigned long sp = regs->ARM_sp; + unsigned long sp = sigsp(regs->ARM_sp, ksig); void __user *frame; /* - * This is the X/Open sanctioned signal stack switching. - */ - if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) - sp = current->sas_ss_sp + current->sas_ss_size; - - /* * ATPCS B01 mandates 8-byte alignment */ frame = (void __user *)((sp - framesize) & ~7); @@ -385,11 +337,22 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) return frame; } +/* + * translate the signal + */ +static inline int map_sig(int sig) +{ + struct thread_info *thread = current_thread_info(); + if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) + sig = thread->exec_domain->signal_invmap[sig]; + return sig; +} + static int -setup_return(struct pt_regs *regs, struct k_sigaction *ka, - unsigned long __user *rc, void __user *frame, int usig) +setup_return(struct pt_regs *regs, struct ksignal *ksig, + unsigned long __user *rc, void __user *frame) { - unsigned long handler = (unsigned long)ka->sa.sa_handler; + unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; unsigned long retcode; int thumb = 0; unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); @@ -399,7 +362,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. */ - if (ka->sa.sa_flags & SA_THIRTYTWO) + if (ksig->ka.sa.sa_flags & SA_THIRTYTWO) cpsr = (cpsr & ~MODE_MASK) | USR_MODE; #ifdef CONFIG_ARM_THUMB @@ -421,12 +384,12 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, } #endif - if (ka->sa.sa_flags & SA_RESTORER) { - retcode = (unsigned long)ka->sa.sa_restorer; + if (ksig->ka.sa.sa_flags & SA_RESTORER) { + retcode = (unsigned long)ksig->ka.sa.sa_restorer; } else { unsigned int idx = thumb << 1; - if (ka->sa.sa_flags & SA_SIGINFO) + if (ksig->ka.sa.sa_flags & SA_SIGINFO) idx += 3; if (__put_user(sigreturn_codes[idx], rc) || @@ -451,7 +414,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, } } - regs->ARM_r0 = usig; + regs->ARM_r0 = map_sig(ksig->sig); regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = handler; @@ -461,9 +424,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, } static int -setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) +setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); + struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); int err = 0; if (!frame) @@ -476,36 +439,29 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg err |= setup_sigframe(frame, regs, set); if (err == 0) - err = setup_return(regs, ka, frame->retcode, frame, usig); + err = setup_return(regs, ksig, frame->retcode, frame); return err; } static int -setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); - stack_t stack; + struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); int err = 0; if (!frame) return 1; - err |= copy_siginfo_to_user(&frame->info, info); + err |= copy_siginfo_to_user(&frame->info, &ksig->info); __put_user_error(0, &frame->sig.uc.uc_flags, err); __put_user_error(NULL, &frame->sig.uc.uc_link, err); - memset(&stack, 0, sizeof(stack)); - stack.ss_sp = (void __user *)current->sas_ss_sp; - stack.ss_flags = sas_ss_flags(regs->ARM_sp); - stack.ss_size = current->sas_ss_size; - err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); - + err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) - err = setup_return(regs, ka, frame->sig.retcode, frame, usig); + err = setup_return(regs, ksig, frame->sig.retcode, frame); if (err == 0) { /* @@ -523,40 +479,25 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, /* * OK, we're invoking a handler */ -static void -handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, struct pt_regs *regs) +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); - struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); - int usig = sig; int ret; /* - * translate the signal - */ - if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) - usig = thread->exec_domain->signal_invmap[usig]; - - /* * Set up the stack frame */ - if (ka->sa.sa_flags & SA_SIGINFO) - ret = setup_rt_frame(usig, ka, info, oldset, regs); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + ret = setup_rt_frame(ksig, oldset, regs); else - ret = setup_frame(usig, ka, oldset, regs); + ret = setup_frame(ksig, oldset, regs); /* * Check that the resulting registers are actually sane. */ ret |= !valid_user_regs(regs); - if (ret != 0) { - force_sigsegv(sig, tsk); - return; - } - signal_delivered(sig, info, ka, regs, 0); + signal_setup_done(ret, ksig, 0); } /* @@ -571,9 +512,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, static int do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; - struct k_sigaction ka; - siginfo_t info; - int signr; + struct ksignal ksig; int restart = 0; /* @@ -605,33 +544,32 @@ static int do_signal(struct pt_regs *regs, int syscall) * Get the signal to deliver. When running under ptrace, at this * point the debugger may change all our registers ... */ - signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* * Depending on the signal settings we may need to revert the * decision to restart the system call. But skip this if a * debugger has chosen to restart at a different PC. */ - if (regs->ARM_pc != restart_addr) - restart = 0; - if (signr > 0) { - if (unlikely(restart)) { + if (get_signal(&ksig)) { + /* handler */ + if (unlikely(restart) && regs->ARM_pc == restart_addr) { if (retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS - && !(ka.sa.sa_flags & SA_RESTART))) { + && !(ksig.ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; regs->ARM_pc = continue_addr; } } - - handle_signal(signr, &ka, &info, regs); - return 0; + handle_signal(&ksig, regs); + } else { + /* no handler */ + restore_saved_sigmask(); + if (unlikely(restart) && regs->ARM_pc == restart_addr) { + regs->ARM_pc = continue_addr; + return restart; + } } - - restore_saved_sigmask(); - if (unlikely(restart)) - regs->ARM_pc = continue_addr; - return restart; + return 0; } asmlinkage int diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf..1bdfd87 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -125,18 +125,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ - if (smp_ops.smp_prepare_cpus) - smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ - if (smp_ops.smp_secondary_init) - smp_ops.smp_secondary_init(cpu); -} - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { if (smp_ops.smp_boot_secondary) @@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) return 1; } -static void platform_cpu_die(unsigned int cpu) -{ - if (smp_ops.cpu_die) - smp_ops.cpu_die(cpu); -} - static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) @@ -257,7 +239,8 @@ void __ref cpu_die(void) * actual CPU shutdown procedure is at least platform (if not * CPU) specific. */ - platform_cpu_die(cpu); + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); /* * Do not return to the idle loop - jump back to the secondary @@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * Give the platform a chance to do its own initialisation. */ - platform_secondary_init(cpu); + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); @@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should - * re-initialize the map in platform_smp_prepare_cpus() if - * present != possible (e.g. physical hotplug). + * re-initialize the map in the platforms smp_prepare_cpus() + * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); @@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) * Initialise the SCU if there are more than one CPU * and let them know where to start. */ - platform_smp_prepare_cpus(max_cpus); + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); } } @@ -416,7 +401,8 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { - smp_cross_call = fn; + if (!smp_cross_call) + smp_cross_call = fn; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -475,19 +461,11 @@ u64 smp_irq_stat_cpu(unsigned int cpu) */ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); -static void ipi_timer(void) -{ - struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); - evt->event_handler(evt); -} - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -static void smp_timer_broadcast(const struct cpumask *mask) +void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } -#else -#define smp_timer_broadcast NULL #endif static void broadcast_timer_set_mode(enum clock_event_mode mode, @@ -530,7 +508,6 @@ static void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); - evt->broadcast = smp_timer_broadcast; if (!lt_ops || lt_ops->setup(evt)) broadcast_timer_setup(evt); @@ -596,11 +573,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) case IPI_WAKEUP: break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); - ipi_timer(); + tick_receive_broadcast(); irq_exit(); break; +#endif case IPI_RESCHEDULE: scheduler_ipi(); @@ -693,6 +672,9 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; + if (arm_delay_ops.const_clock) + return NOTIFY_OK; + if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e..45eac87 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) int scu_power_mode(void __iomem *scu_base, unsigned int mode) { unsigned int val; - int cpu = cpu_logical_map(smp_processor_id()); + int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); if (mode > 3 || mode == 1 || cpu > 3) return -EINVAL; diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 49f335d..c092115 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -24,14 +24,12 @@ #include <asm/smp_twd.h> #include <asm/localtimer.h> -#include <asm/hardware/gic.h> /* set up by the platform code */ static void __iomem *twd_base; static struct clk *twd_clk; static unsigned long twd_timer_rate; -static bool common_setup_called; static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu **twd_evt; @@ -239,25 +237,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id) return IRQ_NONE; } -static struct clk *twd_get_clock(void) +static void twd_get_clock(struct device_node *np) { - struct clk *clk; int err; - clk = clk_get_sys("smp_twd", NULL); - if (IS_ERR(clk)) { - pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); - return clk; + if (np) + twd_clk = of_clk_get(np, 0); + else + twd_clk = clk_get_sys("smp_twd", NULL); + + if (IS_ERR(twd_clk)) { + pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); + return; } - err = clk_prepare_enable(clk); + err = clk_prepare_enable(twd_clk); if (err) { pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); - clk_put(clk); - return ERR_PTR(err); + clk_put(twd_clk); + return; } - return clk; + twd_timer_rate = clk_get_rate(twd_clk); } /* @@ -280,26 +281,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) } per_cpu(percpu_setup_called, cpu) = true; - /* - * This stuff only need to be done once for the entire TWD cluster - * during the runtime of the system. - */ - if (!common_setup_called) { - twd_clk = twd_get_clock(); - - /* - * We use IS_ERR_OR_NULL() here, because if the clock stubs - * are active we will get a valid clk reference which is - * however NULL and will return the rate 0. In that case we - * need to calibrate the rate instead. - */ - if (!IS_ERR_OR_NULL(twd_clk)) - twd_timer_rate = clk_get_rate(twd_clk); - else - twd_calibrate_rate(); - - common_setup_called = true; - } + twd_calibrate_rate(); /* * The following is done once per CPU the first time .setup() is @@ -330,7 +312,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = { .stop = twd_timer_stop, }; -static int __init twd_local_timer_common_register(void) +static int __init twd_local_timer_common_register(struct device_node *np) { int err; @@ -350,6 +332,8 @@ static int __init twd_local_timer_common_register(void) if (err) goto out_irq; + twd_get_clock(np); + return 0; out_irq: @@ -373,7 +357,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt) if (!twd_base) return -ENOMEM; - return twd_local_timer_common_register(); + return twd_local_timer_common_register(NULL); } #ifdef CONFIG_OF @@ -405,7 +389,7 @@ void __init twd_local_timer_of_register(void) goto out; } - err = twd_local_timer_common_register(); + err = twd_local_timer_common_register(np); out: WARN(err, "twd_local_timer_of_register failed (%d)\n", err); diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index df74518..ab1017b 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -109,10 +109,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; else info.si_code = SEGV_ACCERR; + up_read(¤t->mm->mmap_sem); info.si_signo = SIGSEGV; info.si_errno = 0; diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 09be0c3..955d92d 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -21,7 +21,6 @@ #include <linux/timex.h> #include <linux/errno.h> #include <linux/profile.h> -#include <linux/syscore_ops.h> #include <linux/timer.h> #include <linux/irq.h> @@ -31,11 +30,6 @@ #include <asm/mach/arch.h> #include <asm/mach/time.h> -/* - * Our system timer. - */ -static struct sys_timer *system_timer; - #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) /* this needs a better home */ @@ -69,16 +63,6 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET -u32 arch_gettimeoffset(void) -{ - if (system_timer->offset != NULL) - return system_timer->offset() * 1000; - - return 0; -} -#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ - #ifndef CONFIG_GENERIC_CLOCKEVENTS /* * Kernel system timer support. @@ -129,43 +113,8 @@ int __init register_persistent_clock(clock_access_fn read_boot, return -EINVAL; } -#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) -static int timer_suspend(void) -{ - if (system_timer->suspend) - system_timer->suspend(); - - return 0; -} - -static void timer_resume(void) -{ - if (system_timer->resume) - system_timer->resume(); -} -#else -#define timer_suspend NULL -#define timer_resume NULL -#endif - -static struct syscore_ops timer_syscore_ops = { - .suspend = timer_suspend, - .resume = timer_resume, -}; - -static int __init timer_init_syscore_ops(void) -{ - register_syscore_ops(&timer_syscore_ops); - - return 0; -} - -device_initcall(timer_init_syscore_ops); - void __init time_init(void) { - system_timer = machine_desc->timer; - system_timer->init(); + machine_desc->init_time(); sched_clock_postinit(); } - diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index b0179b8..1c08911 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -296,7 +296,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) bust_spinlocks(0); die_owner = -1; - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b9f38e3..b571484 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -19,7 +19,11 @@ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ - VMLINUX_SYMBOL(__idmap_text_end) = .; + VMLINUX_SYMBOL(__idmap_text_end) = .; \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ + *(.hyp.idmap.text) \ + VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) @@ -140,6 +144,8 @@ SECTIONS } #endif + NOTES + _etext = .; /* End of text and rodata section */ #ifndef CONFIG_XIP_KERNEL @@ -295,8 +301,6 @@ SECTIONS } #endif - NOTES - BSS_SECTION(0, 0, 0) _end = .; |