summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/.gitignore10
-rw-r--r--arch/powerpc/boot/dts/amigaone.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts32
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h24
-rw-r--r--arch/powerpc/include/asm/highmem.h57
-rw-r--r--arch/powerpc/include/asm/hw_irq.h26
-rw-r--r--arch/powerpc/include/asm/pci.h13
-rw-r--r--arch/powerpc/include/asm/perf_counter.h52
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h3
-rw-r--r--arch/powerpc/include/asm/rtas.h5
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/entry_32.S127
-rw-r--r--arch/powerpc/kernel/ftrace.c2
-rw-r--r--arch/powerpc/kernel/head_32.S17
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c417
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/perf_counter.c257
-rw-r--r--arch/powerpc/kernel/power4-pmu.c89
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c95
-rw-r--r--arch/powerpc/kernel/power5-pmu.c98
-rw-r--r--arch/powerpc/kernel/power6-pmu.c72
-rw-r--r--arch/powerpc/kernel/power7-pmu.c61
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c63
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c40
-rw-r--r--arch/powerpc/kernel/rtas.c69
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/highmem.c77
-rw-r--r--arch/powerpc/platforms/44x/warp.c44
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype12
-rw-r--r--arch/powerpc/platforms/cell/smp.c30
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c2
-rw-r--r--arch/powerpc/platforms/chrp/smp.c33
-rw-r--r--arch/powerpc/platforms/maple/setup.c59
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c15
-rw-r--r--arch/powerpc/platforms/powermac/setup.c41
-rw-r--r--arch/powerpc/platforms/powermac/smp.c166
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c38
-rw-r--r--arch/powerpc/platforms/pseries/smp.c30
-rw-r--r--arch/powerpc/sysdev/mpic.c34
47 files changed, 1580 insertions, 687 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9fb344d..d00131c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -62,7 +62,6 @@ config HAVE_LATENCYTOP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
bool
- depends on PPC64
default y
config LOCKDEP_SUPPORT
@@ -126,6 +125,7 @@ config PPC
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
+ select HAVE_PERF_COUNTERS
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 2f50acd..3d80c3e 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -36,3 +36,13 @@ zImage.pseries
zconf.h
zlib.h
zutil.h
+fdt.c
+fdt.h
+fdt_ro.c
+fdt_rw.c
+fdt_strerror.c
+fdt_sw.c
+fdt_wip.c
+libfdt.h
+libfdt_internal.h
+
diff --git a/arch/powerpc/boot/dts/amigaone.dts b/arch/powerpc/boot/dts/amigaone.dts
index 26549fc..49ac36b 100644
--- a/arch/powerpc/boot/dts/amigaone.dts
+++ b/arch/powerpc/boot/dts/amigaone.dts
@@ -70,8 +70,8 @@
devsel-speed = <0x00000001>;
min-grant = <0>;
max-latency = <0>;
- /* First 64k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
- ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00010000>;
+ /* First 4k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
+ ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
interrupt-parent = <&i8259>;
#interrupt-cells = <2>;
#address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index cfc2c60..f468d21 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -100,8 +100,18 @@
};
board-control@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "fsl,fpga-pixis";
reg = <3 0 0x20>;
+ ranges = <0 3 0 0x20>;
+
+ sdcsr_pio: gpio-controller@a {
+ #gpio-cells = <2>;
+ compatible = "fsl,fpga-pixis-gpio-bank";
+ reg = <0xa 1>;
+ gpio-controller;
+ };
};
};
@@ -176,6 +186,28 @@
interrupt-parent = <&mpic>;
};
+ spi@7000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc8610-spi", "fsl,spi";
+ reg = <0x7000 0x40>;
+ cell-index = <0>;
+ interrupts = <59 2>;
+ interrupt-parent = <&mpic>;
+ mode = "cpu";
+ gpios = <&sdcsr_pio 7 0>;
+
+ mmc-slot@0 {
+ compatible = "fsl,mpc8610hpcd-mmc-slot",
+ "mmc-spi-slot";
+ reg = <0>;
+ gpios = <&sdcsr_pio 0 1 /* nCD */
+ &sdcsr_pio 1 0>; /* WP */
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <50000000>;
+ };
+ };
+
display@2c000 {
compatible = "fsl,diu";
reg = <0x2c000 100>;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3d9e887..b44aaab 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
size, direction);
}
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle,
0, size, direction);
}
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle,
offset, size, direction);
}
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
size, direction);
}
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 684a73f..a74c4ee 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -22,9 +22,7 @@
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/highmem.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_atomic_prot(struct page *page, enum km_type type,
+ pgprot_t prot);
+extern void kunmap_atomic(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page)
{
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
-{
- unsigned int idx;
- unsigned long vaddr;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- debug_kmap_atomic(type);
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
- local_flush_tlb_page(NULL, vaddr);
-
- return (void*) vaddr;
-}
-
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
return kmap_atomic_prot(page, type, kmap_prot);
}
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- return;
- }
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
-#endif
- pagefault_enable();
-}
-
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
+
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index b7f8f4a..8b505ea 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
#if defined(CONFIG_BOOKE)
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
#else
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) mtmsr(flags)
+#define raw_local_irq_restore(flags) mtmsr(flags)
#endif
-static inline void local_irq_disable(void)
+static inline void raw_local_irq_disable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 0": : :"memory");
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
#endif
}
-static inline void local_irq_enable(void)
+static inline void raw_local_irq_enable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 1": : :"memory");
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
#endif
}
-static inline void local_irq_save_ptr(unsigned long *flags)
+static inline void raw_local_irq_save_ptr(unsigned long *flags)
{
unsigned long msr;
msr = mfmsr();
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#endif
}
-#define local_save_flags(flags) ((flags) = mfmsr())
-#define local_irq_save(flags) local_irq_save_ptr(&flags)
-#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_local_save_flags(flags) ((flags) = mfmsr())
+#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
+#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
-#define hard_irq_enable() local_irq_enable()
-#define hard_irq_disable() local_irq_disable()
+#define hard_irq_disable() raw_local_irq_disable()
static inline int irqs_disabled_flags(unsigned long flags)
{
@@ -131,6 +131,8 @@ static inline int irqs_disabled_flags(unsigned long flags)
struct irq_chip;
#ifdef CONFIG_PERF_COUNTERS
+
+#ifdef CONFIG_PPC64
static inline unsigned long test_perf_counter_pending(void)
{
unsigned long x;
@@ -154,15 +156,15 @@ static inline void clear_perf_counter_pending(void)
"r" (0),
"i" (offsetof(struct paca_struct, perf_counter_pending)));
}
+#endif /* CONFIG_PPC64 */
-#else
+#else /* CONFIG_PERF_COUNTERS */
static inline unsigned long test_perf_counter_pending(void)
{
return 0;
}
-static inline void set_perf_counter_pending(void) {}
static inline void clear_perf_counter_pending(void) {}
#endif /* CONFIG_PERF_COUNTERS */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index ba17d5d..d9483c5 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -195,19 +195,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res,
struct pci_bus_region *region);
-static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
- struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-
extern void pcibios_claim_one_bus(struct pci_bus *b);
extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index cc7c887..8ccd4e1 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -10,6 +10,8 @@
*/
#include <linux/types.h>
+#include <asm/hw_irq.h>
+
#define MAX_HWCOUNTERS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
@@ -19,27 +21,27 @@
* describe the PMU on a particular POWER-family CPU.
*/
struct power_pmu {
- int n_counter;
- int max_alternatives;
- u64 add_fields;
- u64 test_adder;
- int (*compute_mmcr)(u64 events[], int n_ev,
- unsigned int hwc[], u64 mmcr[]);
- int (*get_constraint)(u64 event, u64 *mskp, u64 *valp);
- int (*get_alternatives)(u64 event, unsigned int flags,
- u64 alt[]);
- void (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
- int (*limited_pmc_event)(u64 event);
- u32 flags;
- int n_generic;
- int *generic_events;
- int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ const char *name;
+ int n_counter;
+ int max_alternatives;
+ unsigned long add_fields;
+ unsigned long test_adder;
+ int (*compute_mmcr)(u64 events[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[]);
+ int (*get_constraint)(u64 event, unsigned long *mskp,
+ unsigned long *valp);
+ int (*get_alternatives)(u64 event, unsigned int flags,
+ u64 alt[]);
+ void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
+ int (*limited_pmc_event)(u64 event);
+ u32 flags;
+ int n_generic;
+ int *generic_events;
+ int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
-extern struct power_pmu *ppmu;
-
/*
* Values for power_pmu.flags
*/
@@ -53,15 +55,23 @@ extern struct power_pmu *ppmu;
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
+extern int register_power_pmu(struct power_pmu *);
+
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
-
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
/*
- * The power_pmu.get_constraint function returns a 64-bit value and
- * a 64-bit mask that express the constraints between this event and
+ * Only override the default definitions in include/linux/perf_counter.h
+ * if we have hardware PMU support.
+ */
+#ifdef CONFIG_PPC_PERF_CTRS
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
+
+/*
+ * The power_pmu.get_constraint function returns a 32/64-bit value and
+ * a 32/64-bit mask that express the constraints between this event and
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index e05d26f..82b7220 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,8 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), pte_val(*((p) + PTRS_PER_PTE)) })
+ (e), ((e) & _PAGE_COMBO) ? \
+ (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define __rpte_to_pte(r) ((r).pte)
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c1233..168fce7 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff);
}
+extern void __cpuinit rtas_give_timebase(void);
+extern void __cpuinit rtas_take_timebase(void);
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 6a4fb29..b73396b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,9 +97,10 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \
- power5-pmu.o power5+-pmu.o power6-pmu.o \
- power7-pmu.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
+obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
+ power5+-pmu.o power6-pmu.o power7-pmu.o
+obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
@@ -108,6 +109,7 @@ obj-y += iomap.o
endif
obj-$(CONFIG_PPC64) += $(obj64-y)
+obj-$(CONFIG_PPC32) += $(obj32-y)
ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
obj-y += ppc_save_regs.o
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4dd38f1..3cadba6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -191,11 +191,49 @@ transfer_to_handler_cont:
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lis r12,reenable_mmu@h
+ ori r12,r12,reenable_mmu@l
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r10
+ SYNC
+ RFI
+reenable_mmu: /* re-enable mmu so we can */
+ mfmsr r10
+ lwz r12,_MSR(r1)
+ xor r10,r10,r12
+ andi. r10,r10,MSR_EE /* Did EE change? */
+ beq 1f
+
+ /* Save handler and return address into the 2 unused words
+ * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
+ * else can be recovered from the pt_regs except r3 which for
+ * normal interrupts has been set to pt_regs and for syscalls
+ * is an argument, so we temporarily use ORIG_GPR3 to save it
+ */
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,ORIG_GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r0,GPR0(r1)
+ lwz r3,ORIG_GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ lwz r9,8(r1)
+ lwz r11,12(r1)
+1: mtctr r11
+ mtlr r9
+ bctr /* jump to handler */
+#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_6xx) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Return from syscalls can (and generally will) hard enable
+ * interrupts. You aren't supposed to call a syscall with
+ * interrupts disabled in the first place. However, to ensure
+ * that we get it right vs. lockdep if it happens, we force
+ * that hard enable here with appropriate tracing if we see
+ * that we have been called with interrupts off
+ */
+ mfmsr r11
+ andi. r12,r11,MSR_EE
+ bne+ 1f
+ /* We came in with interrupts disabled, we enable them now */
+ bl trace_hardirqs_on
+ mfmsr r11
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ ori r11,r11,MSR_EE
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ mtmsr r11
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
@@ -275,6 +338,7 @@ ret_from_syscall:
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ /* Note: We don't bother telling lockdep about it */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
@@ -288,6 +352,19 @@ ret_from_syscall:
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
syscall_exit_cont:
+ lwz r8,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* If we are going to return from the syscall with interrupts
+ * off, we trace that here. It shouldn't happen though but we
+ * want to catch the bugger if it does right ?
+ */
+ andi. r10,r8,MSR_EE
+ bne+ 1f
+ stw r3,GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r3,GPR3(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
- lwz r8,_MSR(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
@@ -394,7 +470,9 @@ syscall_exit_work:
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq ret_from_except
- /* Re-enable interrupts */
+ /* Re-enable interrupts. There is no need to trace that with
+ * lockdep as we are supposed to have IRQs on at this point
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10)
@@ -705,6 +783,7 @@ ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
+ /* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
@@ -744,11 +823,24 @@ resume_kernel:
beq+ restore
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+ * preempt_schedule_irq with IRQs off, so we inform lockdep
+ * now that we -did- turn them off already
+ */
+ bl trace_hardirqs_off
+#endif
1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+ */
+ bl trace_hardirqs_on
+#endif
#else
resume_kernel:
#endif /* CONFIG_PREEMPT */
@@ -765,6 +857,28 @@ restore:
stw r6,icache_44x_need_flush@l(r4)
1:
#endif /* CONFIG_44x */
+
+ lwz r9,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep doesn't know about the fact that IRQs are temporarily turned
+ * off in this assembly code while peeking at TI_FLAGS() and such. However
+ * we need to inform it if the exception turned interrupts off, and we
+ * are about to trun them back on.
+ *
+ * The problem here sadly is that we don't know whether the exceptions was
+ * one that turned interrupts off or not. So we always tell lockdep about
+ * turning them on here when we go back to wherever we came from with EE
+ * on, even if that may meen some redudant calls being tracked. Maybe later
+ * we could encode what the exception did somewhere or test the exception
+ * type in the pt_regs but that sounds overkill
+ */
+ andi. r10,r9,MSR_EE
+ beq 1f
+ bl trace_hardirqs_on
+ lwz r9,_MSR(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
- lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
+ /* Note: We don't need to inform lockdep that we are enabling
+ * interrupts here. As far as it knows, they are already enabled
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
+ /* Note: And we don't tell it we are disabling them again
+ * neither. Those disable/enable cycles used to peek at
+ * TI_FLAGS aren't advertised.
+ */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1b12696..ce1f3e4 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -586,7 +586,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
- if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
*parent = old;
return;
}
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 4846946..fc21329 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -1124,9 +1124,8 @@ mmu_off:
RFI
/*
- * Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to PAGE_OFFSET. From this point on we can't safely
- * call OF any more.
+ * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
+ * (we keep one for debugging) and on others, we use one 256M BAT.
*/
initial_bats:
lis r11,PAGE_OFFSET@h
@@ -1136,12 +1135,16 @@ initial_bats:
bne 4f
ori r11,r11,4 /* set up BAT registers for 601 */
li r8,0x7f /* valid, block length = 8MB */
- oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
- oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
mtspr SPRN_IBAT0L,r8 /* lower BAT register */
- mtspr SPRN_IBAT1U,r9
- mtspr SPRN_IBAT1L,r10
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT1U,r11
+ mtspr SPRN_IBAT1L,r8
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT2U,r11
+ mtspr SPRN_IBAT2L,r8
isync
blr
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
new file mode 100644
index 0000000..75ff47f
--- /dev/null
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -0,0 +1,417 @@
+/*
+ * Performance counter support for MPC7450-family processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <linux/perf_counter.h>
+#include <linux/string.h>
+#include <asm/reg.h>
+#include <asm/cputable.h>
+
+#define N_COUNTER 6 /* Number of hardware counters */
+#define MAX_ALT 3 /* Maximum number of event alternative codes */
+
+/*
+ * Bits in event code for MPC7450 family
+ */
+#define PM_THRMULT_MSKS 0x40000
+#define PM_THRESH_SH 12
+#define PM_THRESH_MSK 0x3f
+#define PM_PMC_SH 8
+#define PM_PMC_MSK 7
+#define PM_PMCSEL_MSK 0x7f
+
+/*
+ * Classify events according to how specific their PMC requirements are.
+ * Result is:
+ * 0: can go on any PMC
+ * 1: can go on PMCs 1-4
+ * 2: can go on PMCs 1,2,4
+ * 3: can go on PMCs 1 or 2
+ * 4: can only go on one PMC
+ * -1: event code is invalid
+ */
+#define N_CLASSES 5
+
+static int mpc7450_classify_event(u32 event)
+{
+ int pmc;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > N_COUNTER)
+ return -1;
+ return 4;
+ }
+ event &= PM_PMCSEL_MSK;
+ if (event <= 1)
+ return 0;
+ if (event <= 7)
+ return 1;
+ if (event <= 13)
+ return 2;
+ if (event <= 22)
+ return 3;
+ return -1;
+}
+
+/*
+ * Events using threshold and possible threshold scale:
+ * code scale? name
+ * 11e N PM_INSTQ_EXCEED_CYC
+ * 11f N PM_ALTV_IQ_EXCEED_CYC
+ * 128 Y PM_DTLB_SEARCH_EXCEED_CYC
+ * 12b Y PM_LD_MISS_EXCEED_L1_CYC
+ * 220 N PM_CQ_EXCEED_CYC
+ * 30c N PM_GPR_RB_EXCEED_CYC
+ * 30d ? PM_FPR_IQ_EXCEED_CYC ?
+ * 311 Y PM_ITLB_SEARCH_EXCEED
+ * 410 N PM_GPR_IQ_EXCEED_CYC
+ */
+
+/*
+ * Return use of threshold and threshold scale bits:
+ * 0 = uses neither, 1 = uses threshold, 2 = uses both
+ */
+static int mpc7450_threshold_use(u32 event)
+{
+ int pmc, sel;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ sel = event & PM_PMCSEL_MSK;
+ switch (pmc) {
+ case 1:
+ if (sel == 0x1e || sel == 0x1f)
+ return 1;
+ if (sel == 0x28 || sel == 0x2b)
+ return 2;
+ break;
+ case 2:
+ if (sel == 0x20)
+ return 1;
+ break;
+ case 3:
+ if (sel == 0xc || sel == 0xd)
+ return 1;
+ if (sel == 0x11)
+ return 2;
+ break;
+ case 4:
+ if (sel == 0x10)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Layout of constraint bits:
+ * 33222222222211111111110000000000
+ * 10987654321098765432109876543210
+ * |< >< > < > < ><><><><><><>
+ * TS TV G4 G3 G2P6P5P4P3P2P1
+ *
+ * P1 - P6
+ * 0 - 11: Count of events needing PMC1 .. PMC6
+ *
+ * G2
+ * 12 - 14: Count of events needing PMC1 or PMC2
+ *
+ * G3
+ * 16 - 18: Count of events needing PMC1, PMC2 or PMC4
+ *
+ * G4
+ * 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
+ *
+ * TV
+ * 24 - 29: Threshold value requested
+ *
+ * TS
+ * 30: Threshold scale value requested
+ */
+
+static u32 pmcbits[N_COUNTER][2] = {
+ { 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
+ { 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
+ { 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
+ { 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
+ { 0x00000200, 0x00000100 }, /* PMC5: P5 */
+ { 0x00000800, 0x00000400 } /* PMC6: P6 */
+};
+
+static u32 classbits[N_CLASSES - 1][2] = {
+ { 0x00000000, 0x00000000 }, /* class 0: no constraint */
+ { 0x00800000, 0x00100000 }, /* class 1: G4 */
+ { 0x00040000, 0x00010000 }, /* class 2: G3 */
+ { 0x00004000, 0x00001000 }, /* class 3: G2 */
+};
+
+static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
+{
+ int pmc, class;
+ u32 mask, value;
+ int thresh, tuse;
+
+ class = mpc7450_classify_event(event);
+ if (class < 0)
+ return -1;
+ if (class == 4) {
+ pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
+ mask = pmcbits[pmc - 1][0];
+ value = pmcbits[pmc - 1][1];
+ } else {
+ mask = classbits[class][0];
+ value = classbits[class][1];
+ }
+
+ tuse = mpc7450_threshold_use(event);
+ if (tuse) {
+ thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
+ mask |= 0x3f << 24;
+ value |= thresh << 24;
+ if (tuse == 2) {
+ mask |= 0x40000000;
+ if ((unsigned int)event & PM_THRMULT_MSKS)
+ value |= 0x40000000;
+ }
+ }
+
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
+ { 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
+ { 0x502, 0x602 }, /* PM_L2_HIT */
+ { 0x503, 0x603 }, /* PM_L3_HIT */
+ { 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
+ { 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
+ { 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
+ { 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
+ { 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
+ { 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
+ { 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
+ { 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
+ { 0x512, 0x612 }, /* PM_INT_LOCAL */
+ { 0x513, 0x61d }, /* PM_L2_MISS */
+ { 0x514, 0x61e }, /* PM_L3_MISS */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u32 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+ return -1;
+}
+
+static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nalt = 1;
+ u32 ae;
+
+ alt[0] = event;
+ nalt = 1;
+ i = find_alternative((u32)event);
+ if (i >= 0) {
+ for (j = 0; j < MAX_ALT; ++j) {
+ ae = event_alternatives[i][j];
+ if (ae && ae != (u32)event)
+ alt[nalt++] = ae;
+ }
+ }
+ return nalt;
+}
+
+/*
+ * Bitmaps of which PMCs each class can use for classes 0 - 3.
+ * Bit i is set if PMC i+1 is usable.
+ */
+static const u8 classmap[N_CLASSES] = {
+ 0x3f, 0x0f, 0x0b, 0x03, 0
+};
+
+/* Bit position and width of each PMCSEL field */
+static const int pmcsel_shift[N_COUNTER] = {
+ 6, 0, 27, 22, 17, 11
+};
+static const u32 pmcsel_mask[N_COUNTER] = {
+ 0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
+};
+
+/*
+ * Compute MMCR0/1/2 values for a set of events.
+ */
+static int mpc7450_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[])
+{
+ u8 event_index[N_CLASSES][N_COUNTER];
+ int n_classevent[N_CLASSES];
+ int i, j, class, tuse;
+ u32 pmc_inuse = 0, pmc_avail;
+ u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
+ u32 ev, pmc, thresh;
+
+ if (n_ev > N_COUNTER)
+ return -1;
+
+ /* First pass: count usage in each class */
+ for (i = 0; i < N_CLASSES; ++i)
+ n_classevent[i] = 0;
+ for (i = 0; i < n_ev; ++i) {
+ class = mpc7450_classify_event(event[i]);
+ if (class < 0)
+ return -1;
+ j = n_classevent[class]++;
+ event_index[class][j] = i;
+ }
+
+ /* Second pass: allocate PMCs from most specific event to least */
+ for (class = N_CLASSES - 1; class >= 0; --class) {
+ for (i = 0; i < n_classevent[class]; ++i) {
+ ev = event[event_index[class][i]];
+ if (class == 4) {
+ pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ } else {
+ /* Find a suitable PMC */
+ pmc_avail = classmap[class] & ~pmc_inuse;
+ if (!pmc_avail)
+ return -1;
+ pmc = ffs(pmc_avail);
+ }
+ pmc_inuse |= 1 << (pmc - 1);
+
+ tuse = mpc7450_threshold_use(ev);
+ if (tuse) {
+ thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
+ mmcr0 |= thresh << 16;
+ if (tuse == 2 && (ev & PM_THRMULT_MSKS))
+ mmcr2 = 0x80000000;
+ }
+ ev &= pmcsel_mask[pmc - 1];
+ ev <<= pmcsel_shift[pmc - 1];
+ if (pmc <= 2)
+ mmcr0 |= ev;
+ else
+ mmcr1 |= ev;
+ hwc[event_index[class][i]] = pmc - 1;
+ }
+ }
+
+ if (pmc_inuse & 1)
+ mmcr0 |= MMCR0_PMC1CE;
+ if (pmc_inuse & 0x3e)
+ mmcr0 |= MMCR0_PMCnCE;
+
+ /* Return MMCRx values */
+ mmcr[0] = mmcr0;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcr2;
+ return 0;
+}
+
+/*
+ * Disable counting by a PMC.
+ * Note that the pmc argument is 0-based here, not 1-based.
+ */
+static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 1)
+ mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+ else
+ mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+}
+
+static int mpc7450_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 1,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x225 },
+ [C(OP_WRITE)] = { 0, 0x227 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x129, 0x115 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0x634, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x312 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x223 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x122, 0x41c },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu mpc7450_pmu = {
+ .name = "MPC7450 family",
+ .n_counter = N_COUNTER,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x00111555ul,
+ .test_adder = 0x00301000ul,
+ .compute_mmcr = mpc7450_compute_mmcr,
+ .get_constraint = mpc7450_get_constraint,
+ .get_alternatives = mpc7450_get_alternatives,
+ .disable_pmc = mpc7450_disable_pmc,
+ .n_generic = ARRAY_SIZE(mpc7450_generic_events),
+ .generic_events = mpc7450_generic_events,
+ .cache_events = &mpc7450_cache_events,
+};
+
+static int init_mpc7450_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
+ return -ENODEV;
+
+ return register_power_pmu(&mpc7450_pmu);
+}
+
+arch_initcall(init_mpc7450_pmu);
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index fa983a5..a359cb0 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.archdata.of_node = np;
if (bus_id)
- dev_set_name(&dev->dev, bus_id);
+ dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(dev);
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb20238..809fdf9 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -29,7 +29,7 @@ struct cpu_hw_counters {
struct perf_counter *counter[MAX_HWCOUNTERS];
u64 events[MAX_HWCOUNTERS];
unsigned int flags[MAX_HWCOUNTERS];
- u64 mmcr[3];
+ unsigned long mmcr[3];
struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
};
@@ -46,6 +46,115 @@ struct power_pmu *ppmu;
*/
static unsigned int freeze_counters_kernel = MMCR0_FCS;
+/*
+ * 32-bit doesn't have MMCRA but does have an MMCR2,
+ * and a few other names are different.
+ */
+#ifdef CONFIG_PPC32
+
+#define MMCR0_FCHV 0
+#define MMCR0_PMCjCE MMCR0_PMCnCE
+
+#define SPRN_MMCRA SPRN_MMCR2
+#define MMCRA_SAMPLE_ENABLE 0
+
+static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void perf_set_pmu_inuse(int inuse) { }
+static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
+static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void perf_read_regs(struct pt_regs *regs) { }
+static inline int perf_intr_is_nmi(struct pt_regs *regs)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC32 */
+
+/*
+ * Things that are specific to 64-bit implementations.
+ */
+#ifdef CONFIG_PPC64
+
+static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
+{
+ unsigned long mmcra = regs->dsisr;
+
+ if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
+ if (slot > 1)
+ return 4 * (slot - 1);
+ }
+ return 0;
+}
+
+static inline void perf_set_pmu_inuse(int inuse)
+{
+ get_lppaca()->pmcregs_in_use = inuse;
+}
+
+/*
+ * The user wants a data address recorded.
+ * If we're not doing instruction sampling, give them the SDAR
+ * (sampled data address). If we are doing instruction sampling, then
+ * only give them the SDAR if it corresponds to the instruction
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
+ * bit in MMCRA.
+ */
+static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
+{
+ unsigned long mmcra = regs->dsisr;
+ unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
+ POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
+
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ *addrp = mfspr(SPRN_SDAR);
+}
+
+static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+{
+ unsigned long mmcra = regs->dsisr;
+
+ if (TRAP(regs) != 0xf00)
+ return 0; /* not a PMU interrupt */
+
+ if (ppmu->flags & PPMU_ALT_SIPR) {
+ if (mmcra & POWER6_MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & POWER6_MMCRA_SIPR) ?
+ PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+ }
+ if (mmcra & MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+}
+
+/*
+ * Overload regs->dsisr to store MMCRA so we only need to read it once
+ * on each interrupt.
+ */
+static inline void perf_read_regs(struct pt_regs *regs)
+{
+ regs->dsisr = mfspr(SPRN_MMCRA);
+}
+
+/*
+ * If interrupts were soft-disabled when a PMU interrupt occurs, treat
+ * it as an NMI.
+ */
+static inline int perf_intr_is_nmi(struct pt_regs *regs)
+{
+ return !regs->softe;
+}
+
+#endif /* CONFIG_PPC64 */
+
static void perf_counter_interrupt(struct pt_regs *regs);
void perf_counter_print_debug(void)
@@ -78,12 +187,14 @@ static unsigned long read_pmc(int idx)
case 6:
val = mfspr(SPRN_PMC6);
break;
+#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
+#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
@@ -115,12 +226,14 @@ static void write_pmc(int idx, unsigned long val)
case 6:
mtspr(SPRN_PMC6, val);
break;
+#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
+#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
@@ -135,15 +248,15 @@ static void write_pmc(int idx, unsigned long val)
static int power_check_constraints(u64 event[], unsigned int cflags[],
int n_ev)
{
- u64 mask, value, nv;
+ unsigned long mask, value, nv;
u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
+ unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
int i, j;
- u64 addf = ppmu->add_fields;
- u64 tadd = ppmu->test_adder;
+ unsigned long addf = ppmu->add_fields;
+ unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
@@ -283,7 +396,7 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
static void power_pmu_read(struct perf_counter *counter)
{
- long val, delta, prev;
+ s64 val, delta, prev;
if (!counter->hw.idx)
return;
@@ -403,14 +516,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
void hw_perf_disable(void)
{
struct cpu_hw_counters *cpuhw;
- unsigned long ret;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_counters);
- ret = cpuhw->disabled;
- if (!ret) {
+ if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
@@ -479,7 +590,7 @@ void hw_perf_enable(void)
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_counters == 0)
- get_lppaca()->pmcregs_in_use = 0;
+ perf_set_pmu_inuse(0);
goto out_enable;
}
@@ -512,7 +623,7 @@ void hw_perf_enable(void)
* bit set and set the hardware counters to their initial values.
* Then unfreeze the counters.
*/
- get_lppaca()->pmcregs_in_use = 1;
+ perf_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -913,6 +1024,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
case PERF_TYPE_RAW:
ev = counter->attr.config;
break;
+ default:
+ return ERR_PTR(-EINVAL);
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
@@ -1007,13 +1120,12 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
-static void record_and_restart(struct perf_counter *counter, long val,
+static void record_and_restart(struct perf_counter *counter, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = counter->hw.sample_period;
s64 prev, delta, left;
int record = 0;
- u64 addr, mmcra, sdsync;
/* we don't have to worry about interrupts here */
prev = atomic64_read(&counter->hw.prev_count);
@@ -1033,8 +1145,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
left = period;
record = 1;
}
- if (left < 0x80000000L)
- val = 0x80000000L - left;
+ if (left < 0x80000000LL)
+ val = 0x80000000LL - left;
}
/*
@@ -1047,22 +1159,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
.period = counter->hw.last_period,
};
- if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
- /*
- * The user wants a data address recorded.
- * If we're not doing instruction sampling,
- * give them the SDAR (sampled data address).
- * If we are doing instruction sampling, then only
- * give them the SDAR if it corresponds to the
- * instruction pointed to by SIAR; this is indicated
- * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
- */
- mmcra = regs->dsisr;
- sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
- POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
- if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
- data.addr = mfspr(SPRN_SDAR);
- }
+ if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
+ perf_get_data_addr(regs, &data.addr);
+
if (perf_counter_overflow(counter, nmi, &data)) {
/*
* Interrupts are coming too fast - throttle them
@@ -1088,25 +1187,12 @@ static void record_and_restart(struct perf_counter *counter, long val,
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
- unsigned long mmcra;
-
- if (TRAP(regs) != 0xf00) {
- /* not a PMU interrupt */
- return user_mode(regs) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
- }
+ u32 flags = perf_get_misc_flags(regs);
- mmcra = regs->dsisr;
- if (ppmu->flags & PPMU_ALT_SIPR) {
- if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
- }
- if (mmcra & MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ if (flags)
+ return flags;
+ return user_mode(regs) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
}
/*
@@ -1115,20 +1201,12 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long mmcra;
unsigned long ip;
- unsigned long slot;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
- ip = mfspr(SPRN_SIAR);
- mmcra = regs->dsisr;
- if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
- slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
- if (slot > 1)
- ip += 4 * (slot - 1);
- }
+ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
@@ -1140,7 +1218,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
int i;
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
struct perf_counter *counter;
- long val;
+ unsigned long val;
int found = 0;
int nmi;
@@ -1148,16 +1226,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
- /*
- * Overload regs->dsisr to store MMCRA so we only need to read it once.
- */
- regs->dsisr = mfspr(SPRN_MMCRA);
+ perf_read_regs(regs);
- /*
- * If interrupts were soft-disabled when this PMU interrupt
- * occurred, treat it as an NMI.
- */
- nmi = !regs->softe;
+ nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
@@ -1214,50 +1285,22 @@ void hw_perf_counter_setup(int cpu)
cpuhw->mmcr[0] = MMCR0_FC;
}
-extern struct power_pmu power4_pmu;
-extern struct power_pmu ppc970_pmu;
-extern struct power_pmu power5_pmu;
-extern struct power_pmu power5p_pmu;
-extern struct power_pmu power6_pmu;
-extern struct power_pmu power7_pmu;
-
-static int init_perf_counters(void)
+int register_power_pmu(struct power_pmu *pmu)
{
- unsigned long pvr;
-
- /* XXX should get this from cputable */
- pvr = mfspr(SPRN_PVR);
- switch (PVR_VER(pvr)) {
- case PV_POWER4:
- case PV_POWER4p:
- ppmu = &power4_pmu;
- break;
- case PV_970:
- case PV_970FX:
- case PV_970MP:
- ppmu = &ppc970_pmu;
- break;
- case PV_POWER5:
- ppmu = &power5_pmu;
- break;
- case PV_POWER5p:
- ppmu = &power5p_pmu;
- break;
- case 0x3e:
- ppmu = &power6_pmu;
- break;
- case 0x3f:
- ppmu = &power7_pmu;
- break;
- }
+ if (ppmu)
+ return -EBUSY; /* something's already registered */
+
+ ppmu = pmu;
+ pr_info("%s performance monitor hardware support registered\n",
+ pmu->name);
+#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_counters_kernel = MMCR0_FCHV;
+#endif /* CONFIG_PPC64 */
return 0;
}
-
-arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 07bd308..db90b0c 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER4
@@ -179,22 +181,22 @@ static short mmcr1_adder_bits[8] = {
*/
static struct unitinfo {
- u64 value, mask;
- int unit;
- int lowerbit;
+ unsigned long value, mask;
+ int unit;
+ int lowerbit;
} p4_unitinfo[16] = {
- [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 },
- [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+ [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 },
+ [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
[PM_ISU1_ALT] =
- { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
- [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+ { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
+ [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
[PM_IFU_ALT] =
- { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
- [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 },
- [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 },
- [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 },
- [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 },
- [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 }
+ { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
+ [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 },
+ [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 },
+ [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 },
+ [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 },
+ [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 }
};
static unsigned char direct_marked_event[8] = {
@@ -249,10 +251,11 @@ static int p4_marked_instr_event(u64 event)
return (mask >> (byte * 8 + bit)) & 1;
}
-static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p4_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, lower, sh;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -282,14 +285,14 @@ static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
value |= p4_unitinfo[unit].value;
sh = p4_unitinfo[unit].lowerbit;
if (sh > 1)
- value |= (u64)lower << sh;
+ value |= (unsigned long)lower << sh;
else if (lower != sh)
return -1;
unit = p4_unitinfo[unit].unit;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
- value |= (u64)unit << (28 - 4 * byte);
+ value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
@@ -353,9 +356,9 @@ static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p4_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel, lower;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
@@ -429,9 +432,11 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
return -1;
/* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */
- mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH;
- mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH;
- mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH;
+ mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2])
+ << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2)
+ << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH;
/* Set TTCxSEL fields. */
if (unitlower & 0xe)
@@ -456,7 +461,8 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
ttm = unit - 1; /* 2->1, 3->2 */
else
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
}
@@ -519,7 +525,7 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void p4_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
/*
* Setting the PMCxSEL field to 0 disables PMC x.
@@ -583,16 +589,27 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power4_pmu = {
- .n_counter = 8,
- .max_alternatives = 5,
- .add_fields = 0x0000001100005555ull,
- .test_adder = 0x0011083300000000ull,
- .compute_mmcr = p4_compute_mmcr,
- .get_constraint = p4_get_constraint,
- .get_alternatives = p4_get_alternatives,
- .disable_pmc = p4_disable_pmc,
- .n_generic = ARRAY_SIZE(p4_generic_events),
- .generic_events = p4_generic_events,
- .cache_events = &power4_cache_events,
+static struct power_pmu power4_pmu = {
+ .name = "POWER4/4+",
+ .n_counter = 8,
+ .max_alternatives = 5,
+ .add_fields = 0x0000001100005555ul,
+ .test_adder = 0x0011083300000000ul,
+ .compute_mmcr = p4_compute_mmcr,
+ .get_constraint = p4_get_constraint,
+ .get_alternatives = p4_get_alternatives,
+ .disable_pmc = p4_disable_pmc,
+ .n_generic = ARRAY_SIZE(p4_generic_events),
+ .generic_events = p4_generic_events,
+ .cache_events = &power4_cache_events,
};
+
+static int init_power4_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
+ return -ENODEV;
+
+ return register_power_pmu(&power4_pmu);
+}
+
+arch_initcall(init_power4_pmu);
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 41e5d2d..f4adca8 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
@@ -126,20 +128,21 @@ static const int grsel_shift[8] = {
};
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
- [PM_FPU] = { 0x3200000000ull, 0x0100000000ull },
- [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull },
- [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull },
- [PM_IFU] = { 0x3200000000ull, 0x2100000000ull },
- [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull },
- [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull },
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0x3200000000ul, 0x0100000000ul },
+ [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul },
+ [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul },
+ [PM_IFU] = { 0x3200000000ul, 0x2100000000ul },
+ [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul },
+ [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul },
};
-static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power5p_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -171,17 +174,18 @@ static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
- mask |= (u64)fmask << sh;
- value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ mask |= (unsigned long)fmask << sh;
+ value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
+ << sh;
}
/* Set byte lane select field */
- mask |= 0xfULL << (24 - 4 * byte);
- value |= (u64)unit << (24 - 4 * byte);
+ mask |= 0xfUL << (24 - 4 * byte);
+ value |= (unsigned long)unit << (24 - 4 * byte);
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
- mask |= 0x8000000000000ull;
- value |= 0x1000000000000ull;
+ mask |= 0x8000000000000ul;
+ value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
@@ -452,10 +456,10 @@ static int power5p_marked_instr_event(u64 event)
}
static int power5p_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm;
int i, isbus, bit, grsel;
@@ -517,7 +521,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
@@ -525,7 +529,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
@@ -540,10 +544,11 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
- mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -568,7 +573,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
if (isbus && (byte & 2) &&
(psel == 8 || psel == 0x10 || psel == 0x28))
/* add events on higher-numbered bus */
- mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
@@ -576,7 +581,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
- mmcr1 |= (u64)grsel << grsel_shift[bit];
+ mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5p_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -599,7 +604,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -654,18 +659,30 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power5p_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x7000000000055ull,
- .test_adder = 0x3000040000000ull,
- .compute_mmcr = power5p_compute_mmcr,
- .get_constraint = power5p_get_constraint,
- .get_alternatives = power5p_get_alternatives,
- .disable_pmc = power5p_disable_pmc,
- .limited_pmc_event = power5p_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6,
- .n_generic = ARRAY_SIZE(power5p_generic_events),
- .generic_events = power5p_generic_events,
- .cache_events = &power5p_cache_events,
+static struct power_pmu power5p_pmu = {
+ .name = "POWER5+/++",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000000000055ul,
+ .test_adder = 0x3000040000000ul,
+ .compute_mmcr = power5p_compute_mmcr,
+ .get_constraint = power5p_get_constraint,
+ .get_alternatives = power5p_get_alternatives,
+ .disable_pmc = power5p_disable_pmc,
+ .limited_pmc_event = power5p_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6,
+ .n_generic = ARRAY_SIZE(power5p_generic_events),
+ .generic_events = power5p_generic_events,
+ .cache_events = &power5p_cache_events,
};
+
+static int init_power5p_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
+ && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++"))
+ return -ENODEV;
+
+ return register_power_pmu(&power5p_pmu);
+}
+
+arch_initcall(init_power5p_pmu);
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 05600b6..29b2c6c 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER5 (not POWER5++)
@@ -130,20 +132,21 @@ static const int grsel_shift[8] = {
};
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
- [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull },
- [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull },
- [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull },
- [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull },
- [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull },
- [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull },
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul },
+ [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul },
+ [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul },
+ [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul },
+ [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul },
+ [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul },
};
-static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power5_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -178,8 +181,9 @@ static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
- mask |= (u64)fmask << sh;
- value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ mask |= (unsigned long)fmask << sh;
+ value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
+ << sh;
}
/*
* Bus events on bytes 0 and 2 can be counted
@@ -188,22 +192,22 @@ static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
if (!pmc)
grp = byte & 1;
/* Set byte lane select field */
- mask |= 0xfULL << (24 - 4 * byte);
- value |= (u64)unit << (24 - 4 * byte);
+ mask |= 0xfUL << (24 - 4 * byte);
+ value |= (unsigned long)unit << (24 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2 field */
- mask |= 0x200000000ull;
- value |= 0x080000000ull;
+ mask |= 0x200000000ul;
+ value |= 0x080000000ul;
} else if (grp == 1) {
/* increment PMC3/4 field */
- mask |= 0x40000000ull;
- value |= 0x10000000ull;
+ mask |= 0x40000000ul;
+ value |= 0x10000000ul;
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
- mask |= 0x8000000000000ull;
- value |= 0x1000000000000ull;
+ mask |= 0x8000000000000ul;
+ value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
@@ -383,10 +387,10 @@ static int power5_marked_instr_event(u64 event)
}
static int power5_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
int i, isbus, bit, grsel;
@@ -457,7 +461,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
@@ -465,7 +469,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
@@ -480,10 +484,11 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
- mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -513,7 +518,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
--pmc;
if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
/* add events on higher-numbered bus */
- mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
@@ -521,7 +526,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
- mmcr1 |= (u64)grsel << grsel_shift[bit];
+ mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -541,7 +546,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void power5_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -596,16 +601,27 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power5_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x7000090000555ull,
- .test_adder = 0x3000490000000ull,
- .compute_mmcr = power5_compute_mmcr,
- .get_constraint = power5_get_constraint,
- .get_alternatives = power5_get_alternatives,
- .disable_pmc = power5_disable_pmc,
- .n_generic = ARRAY_SIZE(power5_generic_events),
- .generic_events = power5_generic_events,
- .cache_events = &power5_cache_events,
+static struct power_pmu power5_pmu = {
+ .name = "POWER5",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000090000555ul,
+ .test_adder = 0x3000490000000ul,
+ .compute_mmcr = power5_compute_mmcr,
+ .get_constraint = power5_get_constraint,
+ .get_alternatives = power5_get_alternatives,
+ .disable_pmc = power5_disable_pmc,
+ .n_generic = ARRAY_SIZE(power5_generic_events),
+ .generic_events = power5_generic_events,
+ .cache_events = &power5_cache_events,
};
+
+static int init_power5_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
+ return -ENODEV;
+
+ return register_power_pmu(&power5_pmu);
+}
+
+arch_initcall(init_power5_pmu);
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 46f74be..09ae5bf 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER6
@@ -41,9 +43,9 @@
#define MMCR1_NESTSEL_SH 45
#define MMCR1_NESTSEL_MSK 0x7
#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
-#define MMCR1_PMC1_LLA ((u64)1 << 44)
-#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39)
-#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35)
+#define MMCR1_PMC1_LLA (1ul << 44)
+#define MMCR1_PMC1_LLA_VALUE (1ul << 39)
+#define MMCR1_PMC1_ADDR_SEL (1ul << 35)
#define MMCR1_PMC1SEL_SH 24
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0xff
@@ -173,10 +175,10 @@ static int power6_marked_instr_event(u64 event)
* Assign PMC numbers and compute MMCR1 value for a set of events
*/
static int p6_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
int i;
unsigned int pmc, ev, b, u, s, psel;
unsigned int ttmset = 0;
@@ -215,7 +217,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
/* check for conflict on this byte of event bus */
if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
return -1;
- mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b);
+ mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b);
ttmset |= 1 << b;
if (u == 5) {
/* Nest events have a further mux */
@@ -224,7 +226,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
MMCR1_NESTSEL(mmcr1) != s)
return -1;
ttmset |= 0x10;
- mmcr1 |= (u64)s << MMCR1_NESTSEL_SH;
+ mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH;
}
if (0x30 <= psel && psel <= 0x3d) {
/* these need the PMCx_ADDR_SEL bits */
@@ -243,7 +245,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
if (power6_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
if (pmc < 4)
- mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc);
+ mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
}
mmcr[0] = 0;
if (pmc_inuse & 1)
@@ -265,10 +267,11 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
* 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
* 32-34 select field: nest (subunit) event selector
*/
-static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p6_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, sh, subunit;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -282,11 +285,11 @@ static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
sh = byte * 4 + (16 - PM_UNIT_SH);
mask |= PM_UNIT_MSKS << sh;
- value |= (u64)(event & PM_UNIT_MSKS) << sh;
+ value |= (unsigned long)(event & PM_UNIT_MSKS) << sh;
if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
- mask |= (u64)PM_SUBUNIT_MSK << 32;
- value |= (u64)subunit << 32;
+ mask |= (unsigned long)PM_SUBUNIT_MSK << 32;
+ value |= (unsigned long)subunit << 32;
}
}
if (pmc <= 4) {
@@ -458,7 +461,7 @@ static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return nalt;
}
-static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
/* Set PMCxSEL to 0 to disable PMCx */
if (pmc <= 3)
@@ -515,18 +518,29 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power6_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x1555,
- .test_adder = 0x3000,
- .compute_mmcr = p6_compute_mmcr,
- .get_constraint = p6_get_constraint,
- .get_alternatives = p6_get_alternatives,
- .disable_pmc = p6_disable_pmc,
- .limited_pmc_event = p6_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
- .n_generic = ARRAY_SIZE(power6_generic_events),
- .generic_events = power6_generic_events,
- .cache_events = &power6_cache_events,
+static struct power_pmu power6_pmu = {
+ .name = "POWER6",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x1555,
+ .test_adder = 0x3000,
+ .compute_mmcr = p6_compute_mmcr,
+ .get_constraint = p6_get_constraint,
+ .get_alternatives = p6_get_alternatives,
+ .disable_pmc = p6_disable_pmc,
+ .limited_pmc_event = p6_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
+ .n_generic = ARRAY_SIZE(power6_generic_events),
+ .generic_events = power6_generic_events,
+ .cache_events = &power6_cache_events,
};
+
+static int init_power6_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
+ return -ENODEV;
+
+ return register_power_pmu(&power6_pmu);
+}
+
+arch_initcall(init_power6_pmu);
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index b72e7a1..5d755ef 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER7
@@ -71,10 +73,11 @@
* 0-9: Count of events needing PMC1..PMC5
*/
-static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power7_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, sh;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -224,10 +227,10 @@ static int power7_marked_instr_event(u64 event)
}
static int power7_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, combine, l2sel, psel;
unsigned int pmc_inuse = 0;
int i;
@@ -265,11 +268,14 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
--pmc;
}
if (pmc <= 3) {
- mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc);
- mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc);
+ mmcr1 |= (unsigned long) unit
+ << (MMCR1_TTM0SEL_SH - 4 * pmc);
+ mmcr1 |= (unsigned long) combine
+ << (MMCR1_PMC1_COMBINE_SH - pmc);
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
if (unit == 6) /* L2 events */
- mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH;
+ mmcr1 |= (unsigned long) l2sel
+ << MMCR1_L2SEL_SH;
}
if (power7_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -287,10 +293,10 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
- mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc));
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
}
static int power7_generic_events[] = {
@@ -342,16 +348,27 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power7_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT + 1,
- .add_fields = 0x1555ull,
- .test_adder = 0x3000ull,
- .compute_mmcr = power7_compute_mmcr,
- .get_constraint = power7_get_constraint,
- .get_alternatives = power7_get_alternatives,
- .disable_pmc = power7_disable_pmc,
- .n_generic = ARRAY_SIZE(power7_generic_events),
- .generic_events = power7_generic_events,
- .cache_events = &power7_cache_events,
+static struct power_pmu power7_pmu = {
+ .name = "POWER7",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT + 1,
+ .add_fields = 0x1555ul,
+ .test_adder = 0x3000ul,
+ .compute_mmcr = power7_compute_mmcr,
+ .get_constraint = power7_get_constraint,
+ .get_alternatives = power7_get_alternatives,
+ .disable_pmc = power7_disable_pmc,
+ .n_generic = ARRAY_SIZE(power7_generic_events),
+ .generic_events = power7_generic_events,
+ .cache_events = &power7_cache_events,
};
+
+static int init_power7_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
+ return -ENODEV;
+
+ return register_power_pmu(&power7_pmu);
+}
+
+arch_initcall(init_power7_pmu);
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index ba0a357..6637c87 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/string.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for PPC970
@@ -183,7 +185,7 @@ static int p970_marked_instr_event(u64 event)
}
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
[PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
[PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
[PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
@@ -192,10 +194,11 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = {
[PM_STS] = { 0x380000000000ull, 0x310000000000ull },
};
-static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p970_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh, spcsel;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -222,7 +225,7 @@ static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
grp = byte & 1;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
- value |= (u64)unit << (28 - 4 * byte);
+ value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
@@ -236,7 +239,7 @@ static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
if (spcsel) {
mask |= 3ull << 48;
- value |= (u64)spcsel << 48;
+ value |= (unsigned long)spcsel << 48;
}
*maskp = mask;
*valp = value;
@@ -257,9 +260,9 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p970_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
@@ -320,7 +323,7 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
continue;
ttm = unitmap[i];
++ttmuse[(ttm >> 2) & 1];
- mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH;
}
/* Check only one unit per TTMx */
if (ttmuse[0] > 1 || ttmuse[1] > 1)
@@ -340,7 +343,8 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
if (unit == PM_LSU1L && byte >= 2)
mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
}
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -386,7 +390,8 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
for (pmc = 0; pmc < 2; ++pmc)
mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
for (; pmc < 8; ++pmc)
- mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+ mmcr1 |= (unsigned long)pmcsel[pmc]
+ << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
if (pmc_inuse & 1)
mmcr0 |= MMCR0_PMC1CE;
if (pmc_inuse & 0xfe)
@@ -401,7 +406,7 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
int shift, i;
@@ -467,16 +472,28 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu ppc970_pmu = {
- .n_counter = 8,
- .max_alternatives = 2,
- .add_fields = 0x001100005555ull,
- .test_adder = 0x013300000000ull,
- .compute_mmcr = p970_compute_mmcr,
- .get_constraint = p970_get_constraint,
- .get_alternatives = p970_get_alternatives,
- .disable_pmc = p970_disable_pmc,
- .n_generic = ARRAY_SIZE(ppc970_generic_events),
- .generic_events = ppc970_generic_events,
- .cache_events = &ppc970_cache_events,
+static struct power_pmu ppc970_pmu = {
+ .name = "PPC970/FX/MP",
+ .n_counter = 8,
+ .max_alternatives = 2,
+ .add_fields = 0x001100005555ull,
+ .test_adder = 0x013300000000ull,
+ .compute_mmcr = p970_compute_mmcr,
+ .get_constraint = p970_get_constraint,
+ .get_alternatives = p970_get_alternatives,
+ .disable_pmc = p970_disable_pmc,
+ .n_generic = ARRAY_SIZE(ppc970_generic_events),
+ .generic_events = ppc970_generic_events,
+ .cache_events = &ppc970_cache_events,
};
+
+static int init_ppc970_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
+ && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP"))
+ return -ENODEV;
+
+ return register_power_pmu(&ppc970_pmu);
+}
+
+arch_initcall(init_ppc970_pmu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3e7135b..892a9f2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs)
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
- printk("\n" KERN_INFO "GPR%02d: ", i);
+ printk("\nGPR%02d: ", i);
printk(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs))
break;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ef6f649..a538824 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1947,8 +1947,47 @@ static void __init fixup_device_tree_maple(void)
prom_setprop(isa, name, "ranges",
isa_ranges, sizeof(isa_ranges));
}
+
+#define CPC925_MC_START 0xf8000000
+#define CPC925_MC_LENGTH 0x1000000
+/* The values for memory-controller don't have right number of cells */
+static void __init fixup_device_tree_maple_memory_controller(void)
+{
+ phandle mc;
+ u32 mc_reg[4];
+ char *name = "/hostbridge@f8000000";
+ struct prom_t *_prom = &RELOC(prom);
+ u32 ac, sc;
+
+ mc = call_prom("finddevice", 1, 1, ADDR(name));
+ if (!PHANDLE_VALID(mc))
+ return;
+
+ if (prom_getproplen(mc, "reg") != 8)
+ return;
+
+ prom_getprop(_prom->root, "#address-cells", &ac, sizeof(ac));
+ prom_getprop(_prom->root, "#size-cells", &sc, sizeof(sc));
+ if ((ac != 2) || (sc != 2))
+ return;
+
+ if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
+ return;
+
+ if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
+ return;
+
+ prom_printf("Fixing up bogus hostbridge on Maple...\n");
+
+ mc_reg[0] = 0x0;
+ mc_reg[1] = CPC925_MC_START;
+ mc_reg[2] = 0x0;
+ mc_reg[3] = CPC925_MC_LENGTH;
+ prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
+}
#else
#define fixup_device_tree_maple()
+#define fixup_device_tree_maple_memory_controller()
#endif
#ifdef CONFIG_PPC_CHRP
@@ -2189,6 +2228,7 @@ static void __init fixup_device_tree_efika(void)
static void __init fixup_device_tree(void)
{
fixup_device_tree_maple();
+ fixup_device_tree_maple_memory_controller();
fixup_device_tree_chrp();
fixup_device_tree_pmac();
fixup_device_tree_efika();
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee4c760..c434823 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -38,9 +38,10 @@
#include <asm/syscalls.h>
#include <asm/smp.h>
#include <asm/atomic.h>
+#include <asm/time.h>
struct rtas_t rtas = {
- .lock = SPIN_LOCK_UNLOCKED
+ .lock = __RAW_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf;
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook);
+/* RTAS use home made raw locking instead of spin_lock_irqsave
+ * because those can be called from within really nasty contexts
+ * such as having the timebase stopped which would lockup with
+ * normal locks and spinlock debugging enabled
+ */
+static unsigned long lock_rtas(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ __raw_spin_lock_flags(&rtas.lock, flags);
+ return flags;
+}
+
+static void unlock_rtas(unsigned long flags)
+{
+ __raw_spin_unlock(&rtas.lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
/*
* call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which
@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c)
if (!rtas.base)
return;
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
args->token = 10;
args->nargs = 1;
@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c)
enter_rtas(__pa(args));
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
}
static void call_rtas_display_status_delay(char c)
@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
- /* Gotta do something different here, use global lock for now... */
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
rtas_args = &rtas.args;
rtas_args->token = token;
@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
outputs[i] = rtas_args->rets[i+1];
ret = (nret > 0)? rtas_args->rets[0]: 0;
- /* Gotta do something different here, use global lock for now... */
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
buff_copy = get_errorlog_buffer();
- spin_lock_irqsave(&rtas.lock, flags);
+ flags = lock_rtas();
rtas.args = args;
enter_rtas(__pa(&rtas.args));
@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (args.rets[0] == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
- spin_unlock_irqrestore(&rtas.lock, flags);
+ unlock_rtas(flags);
if (buff_copy) {
if (errbuf)
@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node,
/* break now */
return 1;
}
+
+static raw_spinlock_t timebase_lock;
+static u64 timebase = 0;
+
+void __cpuinit rtas_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
+ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
+ timebase = get_tb();
+ __raw_spin_unlock(&timebase_lock);
+
+ while (timebase)
+ barrier();
+ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
+ local_irq_restore(flags);
+}
+
+void __cpuinit rtas_take_timebase(void)
+{
+ while (!timebase)
+ barrier();
+ __raw_spin_lock(&timebase_lock);
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ __raw_spin_unlock(&timebase_lock);
+}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d15424..e1e3059 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
*/
notrace void __init machine_init(unsigned long dt_ptr)
{
+ lockdep_init();
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 65484b2..0b47de0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
-static volatile unsigned int cpu_callin_map[NR_CPUS];
+/* Can't be static due to PowerMac hackery */
+volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 15391c2..eae4511 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,6 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/perf_counter.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -525,6 +526,26 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
+#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_counter_pending);
+
+void set_perf_counter_pending(void)
+{
+ get_cpu_var(perf_counter_pending) = 1;
+ set_dec(1);
+ put_cpu_var(perf_counter_pending);
+}
+
+#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
+#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
+
+#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
+#define test_perf_counter_pending() 0
+#define clear_perf_counter_pending()
+
+#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
/*
* For iSeries shared processors, we have to let the hypervisor
* set the hardware decrementer. We set a virtual decrementer
@@ -551,6 +572,10 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
+ if (test_perf_counter_pending()) {
+ clear_perf_counter_pending();
+ perf_counter_do_pending();
+ }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 0362a89..acb74a1 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void)
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
-static int udbg_44x_as1_flush(void)
+static void udbg_44x_as1_flush(void)
{
if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 2d2192e..3e68363 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5beffc8..830bef0 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -302,7 +302,7 @@ good_area:
* the fault.
*/
survive:
- ret = handle_mm_fault(mm, vma, address, is_write);
+ ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
new file mode 100644
index 0000000..c2186c7
--- /dev/null
+++ b/arch/powerpc/mm/highmem.c
@@ -0,0 +1,77 @@
+/*
+ * highmem.c: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ *
+ * Reworked for PowerPC by various contributors. Moved from
+ * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+{
+ unsigned int idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+ __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
+ local_flush_tlb_page(NULL, vaddr);
+
+ return (void*) vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
+ pagefault_enable();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+#endif
+ pagefault_enable();
+}
+EXPORT_SYMBOL(kunmap_atomic);
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 42e09a9..0362c88 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@@ -65,7 +66,6 @@ define_machine(warp) {
static u32 post_info;
-/* I am not sure this is the best place for this... */
static int __init warp_post_info(void)
{
struct device_node *np;
@@ -194,9 +194,9 @@ static int pika_setup_leds(void)
return 0;
}
-static void pika_setup_critical_temp(struct i2c_client *client)
+static void pika_setup_critical_temp(struct device_node *np,
+ struct i2c_client *client)
{
- struct device_node *np;
int irq, rc;
/* Do this before enabling critical temp interrupt since we
@@ -208,14 +208,7 @@ static void pika_setup_critical_temp(struct i2c_client *client)
i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
- np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
- if (np == NULL) {
- printk(KERN_ERR __FILE__ ": Unable to find ad7414\n");
- return;
- }
-
irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if (irq == NO_IRQ) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return;
@@ -244,32 +237,24 @@ static inline void pika_dtm_check_fan(void __iomem *fpga)
static int pika_dtm_thread(void __iomem *fpga)
{
- struct i2c_adapter *adap;
+ struct device_node *np;
struct i2c_client *client;
- /* We loop in case either driver was compiled as a module and
- * has not been insmoded yet.
- */
- while (!(adap = i2c_get_adapter(0))) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- }
-
- while (1) {
- list_for_each_entry(client, &adap->clients, list)
- if (client->addr == 0x4a)
- goto found_it;
+ np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
+ if (np == NULL)
+ return -ENOENT;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ client = of_find_i2c_device_by_node(np);
+ if (client == NULL) {
+ of_node_put(np);
+ return -ENOENT;
}
-found_it:
- pika_setup_critical_temp(client);
+ pika_setup_critical_temp(np, client);
- i2c_put_adapter(adap);
+ of_node_put(np);
- printk(KERN_INFO "PIKA DTM thread running.\n");
+ printk(KERN_INFO "Warp DTM thread running.\n");
while (!kthread_should_stop()) {
int val;
@@ -291,7 +276,6 @@ found_it:
return 0;
}
-
static int __init pika_dtm_start(void)
{
struct task_struct *dtm_thread;
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 51eec0c..627908a 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -37,6 +37,7 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/simple_gpio.h>
#include "mpc86xx.h"
@@ -51,6 +52,9 @@ static struct of_device_id __initdata mpc8610_ids[] = {
static int __init mpc8610_declare_of_platform_devices(void)
{
+ /* Firstly, register PIXIS GPIOs. */
+ simple_gpiochip_init("fsl,fpga-pixis-gpio-bank");
+
/* Without this call, the SSI device driver won't get probed. */
of_platform_bus_probe(NULL, mpc8610_ids, NULL);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index c419254..61187be 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,7 @@
config PPC64
bool "64-bit kernel"
default n
- select HAVE_PERF_COUNTERS
+ select PPC_HAVE_PMU_SUPPORT
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
@@ -78,6 +78,7 @@ config POWER4_ONLY
config 6xx
def_bool y
depends on PPC32 && PPC_BOOK3S
+ select PPC_HAVE_PMU_SUPPORT
config POWER3
bool
@@ -246,6 +247,15 @@ config VIRT_CPU_ACCOUNTING
If in doubt, say Y here.
+config PPC_HAVE_PMU_SUPPORT
+ bool
+
+config PPC_PERF_CTRS
+ def_bool y
+ depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
+ help
+ This enables the powerpc-specific perf_counter back-end.
+
config SMP
depends on PPC_STD_MMU || FSL_BOOKE
bool "Symmetric multi-processing support"
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 9046803..bc97fad 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -36,7 +36,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
@@ -140,31 +139,6 @@ static void __devinit smp_cell_setup_cpu(int cpu)
mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase = 0;
-
-static void __devinit cell_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-static void __devinit cell_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
static void __devinit smp_cell_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -224,8 +198,8 @@ void __init smp_init_cell(void)
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = cell_give_timebase;
- smp_ops->take_timebase = cell_take_timebase;
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
}
DBG(" <- smp_init_cell()\n");
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
index 95d8dad..d06ba87 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
}
ret = 0;
- *flt = handle_mm_fault(mm, vma, ea, is_write);
+ *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 10a4a4d..02cafec 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -26,7 +26,6 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
@@ -42,40 +41,12 @@ static void __devinit smp_chrp_setup_cpu(int cpu_nr)
mpic_setup_this_cpu();
}
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned int timebase_upper = 0, timebase_lower = 0;
-
-void __devinit smp_chrp_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase_upper = get_tbu();
- timebase_lower = get_tbl();
- spin_unlock(&timebase_lock);
-
- while (timebase_upper || timebase_lower)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-void __devinit smp_chrp_take_timebase(void)
-{
- while (!(timebase_upper || timebase_lower))
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase_upper, timebase_lower);
- timebase_upper = 0;
- timebase_lower = 0;
- spin_unlock(&timebase_lock);
- printk("CPU %i taken timebase\n", smp_processor_id());
-}
-
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
.setup_cpu = smp_chrp_setup_cpu,
- .give_timebase = smp_chrp_give_timebase,
- .take_timebase = smp_chrp_take_timebase,
+ .give_timebase = rtas_give_timebase,
+ .take_timebase = rtas_take_timebase,
};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index bfd60e4..0636a3d 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -335,3 +335,62 @@ define_machine(maple) {
.progress = maple_progress,
.power_save = power4_idle,
};
+
+#ifdef CONFIG_EDAC
+/*
+ * Register a platform device for CPC925 memory controller on
+ * Motorola ATCA-6101 blade.
+ */
+#define MAPLE_CPC925_MODEL "Motorola,ATCA-6101"
+static int __init maple_cpc925_edac_setup(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np = NULL;
+ struct resource r;
+ const unsigned char *model;
+ int ret;
+
+ np = of_find_node_by_path("/");
+ if (!np) {
+ printk(KERN_ERR "%s: Unable to get root node\n", __func__);
+ return -ENODEV;
+ }
+
+ model = (const unsigned char *)of_get_property(np, "model", NULL);
+ if (!model) {
+ printk(KERN_ERR "%s: Unabel to get model info\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = strcmp(model, MAPLE_CPC925_MODEL);
+ of_node_put(np);
+
+ if (ret != 0)
+ return 0;
+
+ np = of_find_node_by_type(NULL, "memory-controller");
+ if (!np) {
+ printk(KERN_ERR "%s: Unable to find memory-controller node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: Unable to get memory-controller reg\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ printk(KERN_INFO "%s: CPC925 platform device created\n", __func__);
+
+ return 0;
+}
+machine_device_initcall(maple, maple_cpc925_edac_setup);
+#endif
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 153051e..a461934 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,20 +71,25 @@ static void pas_restart(char *cmd)
}
#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(timebase_lock);
+static raw_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
{
- spin_lock(&timebase_lock);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
- spin_unlock(&timebase_lock);
+ __raw_spin_unlock(&timebase_lock);
while (timebase)
barrier();
mtspr(SPRN_TBCTL, TBCTL_RESTART);
+ local_irq_restore(flags);
}
static void __devinit pas_take_timebase(void)
@@ -92,10 +97,10 @@ static void __devinit pas_take_timebase(void)
while (!timebase)
smp_rmb();
- spin_lock(&timebase_lock);
+ __raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- spin_unlock(&timebase_lock);
+ __raw_spin_unlock(&timebase_lock);
}
struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 86f69a4..c205226 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -103,11 +103,6 @@ unsigned long smu_cmdbuf_abs;
EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif
-#ifdef CONFIG_SMP
-extern struct smp_ops_t psurge_smp_ops;
-extern struct smp_ops_t core99_smp_ops;
-#endif /* CONFIG_SMP */
-
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
@@ -341,34 +336,6 @@ static void __init pmac_setup_arch(void)
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif
-#ifdef CONFIG_SMP
- /* Check for Core99 */
- ic = of_find_node_by_name(NULL, "uni-n");
- if (!ic)
- ic = of_find_node_by_name(NULL, "u3");
- if (!ic)
- ic = of_find_node_by_name(NULL, "u4");
- if (ic) {
- of_node_put(ic);
- smp_ops = &core99_smp_ops;
- }
-#ifdef CONFIG_PPC32
- else {
- /*
- * We have to set bits in cpu_possible_map here since the
- * secondary CPU(s) aren't in the device tree, and
- * setup_per_cpu_areas only allocates per-cpu data for
- * CPUs in the cpu_possible_map.
- */
- int cpu;
-
- for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
- cpu_set(cpu, cpu_possible_map);
- smp_ops = &psurge_smp_ops;
- }
-#endif
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_ADB
if (strstr(cmd_line, "adb_sync")) {
extern int __adb_probe_sync;
@@ -512,6 +479,14 @@ static void __init pmac_init_early(void)
#ifdef CONFIG_PPC64
iommu_init_early_dart();
#endif
+
+ /* SMP Init has to be done early as we need to patch up
+ * cpu_possible_map before interrupt stacks are allocated
+ * or kaboom...
+ */
+#ifdef CONFIG_SMP
+ pmac_setup_smp();
+#endif
}
static int __init pmac_declare_of_platform_devices(void)
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index cf1dbe7..6d4da7b 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -64,10 +64,11 @@
extern void __secondary_start_pmac_0(void);
extern int pmac_pfunc_base_install(void);
-#ifdef CONFIG_PPC32
+static void (*pmac_tb_freeze)(int freeze);
+static u64 timebase;
+static int tb_req;
-/* Sync flag for HW tb sync */
-static volatile int sec_tb_reset = 0;
+#ifdef CONFIG_PPC32
/*
* Powersurge (old powermac SMP) support.
@@ -294,6 +295,9 @@ static int __init smp_psurge_probe(void)
psurge_quad_init();
/* All released cards using this HW design have 4 CPUs */
ncpus = 4;
+ /* No sure how timebase sync works on those, let's use SW */
+ smp_ops->give_timebase = smp_generic_give_timebase;
+ smp_ops->take_timebase = smp_generic_take_timebase;
} else {
iounmap(quad_base);
if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
@@ -308,18 +312,15 @@ static int __init smp_psurge_probe(void)
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
- /*
- * This is necessary because OF doesn't know about the
+ /* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't
- * set their bits in cpu_possible_map and cpu_present_map.
+ * set their bits in cpu_present_map.
*/
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
- for (i = 1; i < ncpus ; ++i) {
+ for (i = 1; i < ncpus ; ++i)
cpu_set(i, cpu_present_map);
- set_hard_smp_processor_id(i, i);
- }
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
@@ -329,8 +330,14 @@ static int __init smp_psurge_probe(void)
static void __init smp_psurge_kick_cpu(int nr)
{
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
- unsigned long a;
- int i;
+ unsigned long a, flags;
+ int i, j;
+
+ /* Defining this here is evil ... but I prefer hiding that
+ * crap to avoid giving people ideas that they can do the
+ * same.
+ */
+ extern volatile unsigned int cpu_callin_map[NR_CPUS];
/* may need to flush here if secondary bats aren't setup */
for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
@@ -339,47 +346,52 @@ static void __init smp_psurge_kick_cpu(int nr)
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+ /* This is going to freeze the timeebase, we disable interrupts */
+ local_irq_save(flags);
+
out_be32(psurge_start, start);
mb();
psurge_set_ipi(nr);
+
/*
* We can't use udelay here because the timebase is now frozen.
*/
for (i = 0; i < 2000; ++i)
- barrier();
+ asm volatile("nop" : : : "memory");
psurge_clr_ipi(nr);
- if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
-}
-
-/*
- * With the dual-cpu powersurge board, the decrementers and timebases
- * of both cpus are frozen after the secondary cpu is started up,
- * until we give the secondary cpu another interrupt. This routine
- * uses this to get the timebases synchronized.
- * -- paulus.
- */
-static void __init psurge_dual_sync_tb(int cpu_nr)
-{
- int t;
-
- set_dec(tb_ticks_per_jiffy);
- /* XXX fixme */
- set_tb(0, 0);
-
- if (cpu_nr > 0) {
+ /*
+ * Also, because the timebase is frozen, we must not return to the
+ * caller which will try to do udelay's etc... Instead, we wait -here-
+ * for the CPU to callin.
+ */
+ for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
+ for (j = 1; j < 10000; j++)
+ asm volatile("nop" : : : "memory");
+ asm volatile("sync" : : : "memory");
+ }
+ if (!cpu_callin_map[nr])
+ goto stuck;
+
+ /* And we do the TB sync here too for standard dual CPU cards */
+ if (psurge_type == PSURGE_DUAL) {
+ while(!tb_req)
+ barrier();
+ tb_req = 0;
+ mb();
+ timebase = get_tb();
+ mb();
+ while (timebase)
+ barrier();
mb();
- sec_tb_reset = 1;
- return;
}
+ stuck:
+ /* now interrupt the secondary, restarting both TBs */
+ if (psurge_type == PSURGE_DUAL)
+ psurge_set_ipi(1);
- /* wait for the secondary to have reset its TB before proceeding */
- for (t = 10000000; t > 0 && !sec_tb_reset; --t)
- ;
-
- /* now interrupt the secondary, starting both TBs */
- psurge_set_ipi(1);
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
}
static struct irqaction psurge_irqaction = {
@@ -390,36 +402,35 @@ static struct irqaction psurge_irqaction = {
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
+ if (cpu_nr != 0)
+ return;
- if (cpu_nr == 0) {
- /* If we failed to start the second CPU, we should still
- * send it an IPI to start the timebase & DEC or we might
- * have them stuck.
- */
- if (num_online_cpus() < 2) {
- if (psurge_type == PSURGE_DUAL)
- psurge_set_ipi(1);
- return;
- }
- /* reset the entry point so if we get another intr we won't
- * try to startup again */
- out_be32(psurge_start, 0x100);
- if (setup_irq(30, &psurge_irqaction))
- printk(KERN_ERR "Couldn't get primary IPI interrupt");
- }
-
- if (psurge_type == PSURGE_DUAL)
- psurge_dual_sync_tb(cpu_nr);
+ /* reset the entry point so if we get another intr we won't
+ * try to startup again */
+ out_be32(psurge_start, 0x100);
+ if (setup_irq(30, &psurge_irqaction))
+ printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
void __init smp_psurge_take_timebase(void)
{
- /* Dummy implementation */
+ if (psurge_type != PSURGE_DUAL)
+ return;
+
+ tb_req = 1;
+ mb();
+ while (!timebase)
+ barrier();
+ mb();
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ mb();
+ set_dec(tb_ticks_per_jiffy/2);
}
void __init smp_psurge_give_timebase(void)
{
- /* Dummy implementation */
+ /* Nothing to do here */
}
/* PowerSurge-style Macs */
@@ -437,9 +448,6 @@ struct smp_ops_t psurge_smp_ops = {
* Core 99 and later support
*/
-static void (*pmac_tb_freeze)(int freeze);
-static u64 timebase;
-static int tb_req;
static void smp_core99_give_timebase(void)
{
@@ -478,7 +486,6 @@ static void __devinit smp_core99_take_timebase(void)
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
mb();
- set_dec(tb_ticks_per_jiffy/2);
local_irq_restore(flags);
}
@@ -920,3 +927,34 @@ struct smp_ops_t core99_smp_ops = {
# endif
#endif
};
+
+void __init pmac_setup_smp(void)
+{
+ struct device_node *np;
+
+ /* Check for Core99 */
+ np = of_find_node_by_name(NULL, "uni-n");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u3");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u4");
+ if (np) {
+ of_node_put(np);
+ smp_ops = &core99_smp_ops;
+ }
+#ifdef CONFIG_PPC32
+ else {
+ /* We have to set bits in cpu_possible_map here since the
+ * secondary CPU(s) aren't in the device tree. Various
+ * things won't be initialized for CPUs not in the possible
+ * map, so we really need to fix it up here.
+ */
+ int cpu;
+
+ for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
+ cpu_set(cpu, cpu_possible_map);
+ smp_ops = &psurge_smp_ops;
+ }
+#endif /* CONFIG_PPC32 */
+}
+
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 9a2a6e3..0e8db67 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -122,7 +122,7 @@ static void eeh_enable_irq(struct pci_dev *dev)
* passed back in "userdata".
*/
-static void eeh_report_error(struct pci_dev *dev, void *userdata)
+static int eeh_report_error(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -130,19 +130,21 @@ static void eeh_report_error(struct pci_dev *dev, void *userdata)
dev->error_state = pci_channel_io_frozen;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
@@ -153,7 +155,7 @@ static void eeh_report_error(struct pci_dev *dev, void *userdata)
* Cumulative response passed back in "userdata".
*/
-static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -161,26 +163,28 @@ static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
if (!driver ||
!driver->err_handler ||
!driver->err_handler->mmio_enabled)
- return;
+ return 0;
rc = driver->err_handler->mmio_enabled (dev);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
* eeh_report_reset - tell device that slot has been reset
*/
-static void eeh_report_reset(struct pci_dev *dev, void *userdata)
+static int eeh_report_reset(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
if (!driver)
- return;
+ return 0;
dev->error_state = pci_channel_io_normal;
@@ -188,35 +192,39 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
if (!driver->err_handler ||
!driver->err_handler->slot_reset)
- return;
+ return 0;
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
(*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
+ return 0;
}
/**
* eeh_report_resume - tell device to resume normal operations
*/
-static void eeh_report_resume(struct pci_dev *dev, void *userdata)
+static int eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_normal;
if (!driver)
- return;
+ return 0;
eeh_enable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->resume)
- return;
+ return 0;
driver->err_handler->resume(dev);
+
+ return 0;
}
/**
@@ -226,22 +234,24 @@ static void eeh_report_resume(struct pci_dev *dev, void *userdata)
* dead, and that no further recovery attempts will be made on it.
*/
-static void eeh_report_failure(struct pci_dev *dev, void *userdata)
+static int eeh_report_failure(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_perm_failure;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
+ return 0;
}
/* ------------------------------------------------------- */
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1a231c3..1f8f6cf 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -35,7 +35,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
@@ -118,31 +117,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
}
#endif /* CONFIG_XICS */
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase = 0;
-
-static void __devinit pSeries_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-static void __devinit pSeries_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
static void __devinit smp_pSeries_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -209,8 +183,8 @@ static void __init smp_init_pseries(void)
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = pSeries_give_timebase;
- smp_ops->take_timebase = pSeries_take_timebase;
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
}
pr_debug(" <- smp_init_pSeries()\n");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9c3af50..d46de1f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -279,28 +279,29 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
}
#ifdef CONFIG_PPC_DCR
-static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
+ struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
const u32 *dbasep;
- dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL);
+ dbasep = of_get_property(node, "dcr-reg", NULL);
- rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size);
+ rb->dhost = dcr_map(node, *dbasep + offset, size);
BUG_ON(!DCR_MAP_OK(rb->dhost));
}
-static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr,
- struct mpic_reg_bank *rb, unsigned int offset,
- unsigned int size)
+static inline void mpic_map(struct mpic *mpic, struct device_node *node,
+ phys_addr_t phys_addr, struct mpic_reg_bank *rb,
+ unsigned int offset, unsigned int size)
{
if (mpic->flags & MPIC_USES_DCR)
- _mpic_map_dcr(mpic, rb, offset, size);
+ _mpic_map_dcr(mpic, node, rb, offset, size);
else
_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
}
#else /* CONFIG_PPC_DCR */
-#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
+#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
#endif /* !CONFIG_PPC_DCR */
@@ -1052,11 +1053,10 @@ struct mpic * __init mpic_alloc(struct device_node *node,
int intvec_top;
u64 paddr = phys_addr;
- mpic = alloc_bootmem(sizeof(struct mpic));
+ mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
return NULL;
-
- memset(mpic, 0, sizeof(struct mpic));
+
mpic->name = name;
mpic->hc_irq = mpic_irq_chip;
@@ -1152,8 +1152,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
/* Map the global registers */
- mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
- mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
+ mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+ mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
/* Reset */
if (flags & MPIC_WANTS_RESET) {
@@ -1194,7 +1194,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) {
- mpic_map(mpic, paddr, &mpic->cpuregs[i],
+ mpic_map(mpic, node, paddr, &mpic->cpuregs[i],
MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
0x1000);
}
@@ -1202,7 +1202,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
- mpic_map(mpic, paddr, &mpic->isus[0],
+ mpic_map(mpic, node, paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
@@ -1256,8 +1256,10 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
BUG_ON(isu_num >= MPIC_MAX_ISU);
- mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
+ mpic_map(mpic, mpic->irqhost->of_node,
+ paddr, &mpic->isus[isu_num], 0,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}