summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-03-14 09:05:14 (GMT)
committerMichael Ellerman <mpe@ellerman.id.au>2016-03-14 09:05:14 (GMT)
commita1b5344620a3e6291afaf7542714ba9c391ef1c7 (patch)
treeb79b51bc4019ea50efbbac4a7f7717b6e6d749c0 /arch/powerpc/include
parentd8c0282f4da6d5335fee56141ca92284026f4818 (diff)
parentfba4e9f9898ab181703723b9890ba29345edfe08 (diff)
downloadlinux-a1b5344620a3e6291afaf7542714ba9c391ef1c7.tar.xz
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott: "Highlights include 8xx optimizations, 32-bit checksum optimizations, 86xx consolidation, e5500/e6500 cpu hotplug, more fman and other dt bits, and minor fixes/cleanup."
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/cache.h19
-rw-r--r--arch/powerpc/include/asm/cacheflush.h54
-rw-r--r--arch/powerpc/include/asm/checksum.h141
-rw-r--r--arch/powerpc/include/asm/cputable.h8
-rw-r--r--arch/powerpc/include/asm/cputhreads.h15
-rw-r--r--arch/powerpc/include/asm/fsl_pm.h51
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page_32.h17
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h93
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/time.h6
13 files changed, 362 insertions, 57 deletions
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 5f8229e..ffbafbf 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -69,6 +69,25 @@ extern void _set_L3CR(unsigned long);
#define _set_L3CR(val) do { } while(0)
#endif
+static inline void dcbz(void *addr)
+{
+ __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbi(void *addr)
+{
+ __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbf(void *addr)
+{
+ __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbst(void *addr)
+{
+ __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
+}
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 6229e6b..69fb16d 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void __flush_disable_L1(void);
-
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
@@ -47,12 +45,58 @@ static inline void __flush_dcache_icache_phys(unsigned long physaddr)
}
#endif
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
#ifdef CONFIG_PPC32
-extern void clean_dcache_range(unsigned long start, unsigned long stop);
-extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
+/*
+ * Write any modified data cache blocks out to memory and invalidate them.
+ * Does not invalidate the corresponding instruction cache blocks.
+ */
+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+ void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+ unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ dcbf(addr);
+ mb(); /* sync */
+}
+
+/*
+ * Write any modified data cache blocks out to memory.
+ * Does not invalidate the corresponding cache lines (especially for
+ * any corresponding instruction cache).
+ */
+static inline void clean_dcache_range(unsigned long start, unsigned long stop)
+{
+ void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+ unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ dcbst(addr);
+ mb(); /* sync */
+}
+
+/*
+ * Like above, but invalidate the D-cache. This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ */
+static inline void invalidate_dcache_range(unsigned long start,
+ unsigned long stop)
+{
+ void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+ unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ dcbi(addr);
+ mb(); /* sync */
+}
+
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
+extern void flush_dcache_range(unsigned long start, unsigned long stop);
extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
#endif
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index e8d9ef4..ee655ed 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -9,30 +9,9 @@
* 2 of the License, or (at your option) any later version.
*/
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries. ihl is the number
- * of 32-bit words and is always >= 5.
- */
#ifdef CONFIG_GENERIC_CSUM
#include <asm-generic/checksum.h>
#else
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
@@ -47,21 +26,12 @@ extern __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err, int *dst_err);
-#ifdef __powerpc64__
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum sum, int *err_ptr);
-#else
-/*
- * the same as csum_partial, but copies from src to dst while it
- * checksums.
- */
-#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
- csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL)
-#endif
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
@@ -83,15 +53,6 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-static inline __sum16 ip_compute_csum(const void *buff, int len)
-{
- return csum_fold(csum_partial(buff, len, 0));
-}
-
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
@@ -135,17 +96,117 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
+#endif
+ if (__builtin_constant_p(csum) && csum == 0)
+ return addend;
+ if (__builtin_constant_p(addend) && addend == 0)
+ return csum;
+#ifdef __powerpc64__
res += (__force u64)addend;
return (__force __wsum)((u32)res + (res >> 32));
#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
- : "+r" (csum) : "r" (addend));
+ : "+r" (csum) : "r" (addend) : "xer");
return csum;
#endif
}
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries. ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
+{
+ const u32 *ptr = (const u32 *)iph + 1;
+#ifdef __powerpc64__
+ unsigned int i;
+ u64 s = *(const u32 *)iph;
+
+ for (i = 0; i < ihl - 1; i++, ptr++)
+ s += *ptr;
+ s += (s >> 32);
+ return (__force __wsum)s;
+#else
+ __wsum sum, tmp;
+
+ asm("mtctr %3;"
+ "addc %0,%4,%5;"
+ "1: lwzu %1, 4(%2);"
+ "adde %0,%0,%1;"
+ "bdnz 1b;"
+ "addze %0,%0;"
+ : "=r" (sum), "=r" (tmp), "+b" (ptr)
+ : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
+ : "ctr", "xer", "memory");
+
+ return sum;
+#endif
+}
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return csum_fold(ip_fast_csum_nofold(iph, ihl));
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum __csum_partial(const void *buff, int len, __wsum sum);
+
+static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+ if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
+ if (len == 2)
+ sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
+ if (len >= 4)
+ sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
+ if (len == 6)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 4));
+ if (len >= 8)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 4));
+ if (len == 10)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 8));
+ if (len >= 12)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 8));
+ if (len == 14)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 12));
+ if (len >= 16)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 12));
+ } else if (__builtin_constant_p(len) && (len & 3) == 0) {
+ sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
+ } else {
+ sum = __csum_partial(buff, len, sum);
+ }
+ return sum;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
#endif
#endif /* __KERNEL__ */
#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 94ace9b..df4fb5f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,11 @@ extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
+extern void cpu_down_flush_e500v2(void);
+extern void cpu_down_flush_e500mc(void);
+extern void cpu_down_flush_e5500(void);
+extern void cpu_down_flush_e6500(void);
+
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
@@ -59,6 +64,9 @@ struct cpu_spec {
unsigned int icache_bsize;
unsigned int dcache_bsize;
+ /* flush caches inside the current cpu */
+ void (*cpu_down_flush)(void);
+
/* number of performance monitor counters */
unsigned int num_pmcs;
enum powerpc_pmc_type pmc_type;
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index ba42e46..666bef4e 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_CPUTHREADS_H
#define _ASM_POWERPC_CPUTHREADS_H
+#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
/*
@@ -94,7 +95,21 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+static inline u32 get_tensr(void)
+{
+#ifdef CONFIG_BOOKE
+ if (cpu_has_feature(CPU_FTR_SMT))
+ return mfspr(SPRN_TENSR);
+#endif
+ return 1;
+}
+
+void book3e_start_thread(int thread, unsigned long addr);
+void book3e_stop_thread(int thread);
+
+#endif /* __ASSEMBLY__ */
+#define INVALID_THREAD_HWID 0x0fff
#endif /* _ASM_POWERPC_CPUTHREADS_H */
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 0000000..47df55e
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,51 @@
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+
+#define E500_PM_PH10 1
+#define E500_PM_PH15 2
+#define E500_PM_PH20 3
+#define E500_PM_PH30 4
+#define E500_PM_DOZE E500_PM_PH10
+#define E500_PM_NAP E500_PM_PH15
+
+#define PLAT_PM_SLEEP 20
+#define PLAT_PM_LPM20 30
+
+#define FSL_PM_SLEEP (1 << 0)
+#define FSL_PM_DEEP_SLEEP (1 << 1)
+
+struct fsl_pm_ops {
+ /* mask pending interrupts to the RCPM from MPIC */
+ void (*irq_mask)(int cpu);
+
+ /* unmask pending interrupts to the RCPM from MPIC */
+ void (*irq_unmask)(int cpu);
+ void (*cpu_enter_state)(int cpu, int state);
+ void (*cpu_exit_state)(int cpu, int state);
+ void (*cpu_up_prepare)(int cpu);
+ void (*cpu_die)(int cpu);
+ int (*plat_enter_sleep)(void);
+ void (*freeze_time_base)(bool freeze);
+
+ /* keep the power of IP blocks during sleep/deep sleep */
+ void (*set_ip_power)(bool enable, u32 mask);
+
+ /* get platform supported power management modes */
+ unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+
+int __init fsl_rcpm_init(void);
+
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index f05500a..0a566f1 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -171,9 +171,9 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
-#if (PAGE_SHIFT == 12)
+#if defined(CONFIG_PPC_4K_PAGES)
#define mmu_virtual_psize MMU_PAGE_4K
-#elif (PAGE_SHIFT == 14)
+#elif defined(CONFIG_PPC_16K_PAGES)
#define mmu_virtual_psize MMU_PAGE_16K
#else
#error "Unsupported PAGE_SIZE"
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c82cbf5..7808475 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -86,7 +86,7 @@ extern int icache_44x_need_flush;
* We no longer map larger than phys RAM with the BATs so we don't have
* to worry about the VMALLOC_OFFSET causing problems. We do have to worry
* about clashes between our early calls to ioremap() that start growing down
- * from ioremap_base being run into the VM area allocations (growing upwards
+ * from IOREMAP_TOP being run into the VM area allocations (growing upwards
* from VMALLOC_START). For this reason we have ioremap_bot to check when
* we actually run into our mappings setup in the early boot with the VM
* system. This really does become a problem for machines with good amounts
@@ -309,7 +309,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+ (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+ pte_index(addr))
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 68d73b2..6a8e179 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -1,6 +1,8 @@
#ifndef _ASM_POWERPC_PAGE_32_H
#define _ASM_POWERPC_PAGE_32_H
+#include <asm/cache.h>
+
#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
@@ -36,9 +38,18 @@ typedef unsigned long long pte_basic_t;
typedef unsigned long pte_basic_t;
#endif
-struct page;
-extern void clear_pages(void *page, int order);
-static inline void clear_page(void *page) { clear_pages(page, 0); }
+/*
+ * Clear page using the dcbz instruction, which doesn't cause any
+ * memory traffic (except to write out any cache lines which get
+ * displaced). This only works on cacheable memory.
+ */
+static inline void clear_page(void *addr)
+{
+ unsigned int i;
+
+ for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
+ dcbz(addr);
+}
extern void copy_page(void *to, void *from);
#include <asm-generic/getorder.h>
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 52ed654..f5f4c66 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1219,9 +1219,11 @@ static inline void mtmsr_isync(unsigned long val)
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
+#ifndef mtspr
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
: "r" ((unsigned long)(v)) \
: "memory")
+#endif
extern void msr_check_and_set(unsigned long bits);
extern bool strict_msr_control;
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index e8ea346..d41412c 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -4,6 +4,8 @@
#ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H
+#include <asm/mmu-8xx.h>
+
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
@@ -14,6 +16,15 @@
#define SPRN_DC_ADR 569 /* Address needed for some commands */
#define SPRN_DC_DAT 570 /* Read-only data register */
+/* Misc Debug */
+#define SPRN_DPDR 630
+#define SPRN_MI_CAM 816
+#define SPRN_MI_RAM0 817
+#define SPRN_MI_RAM1 818
+#define SPRN_MD_CAM 824
+#define SPRN_MD_RAM0 825
+#define SPRN_MD_RAM1 826
+
/* Commands. Only the first few are available to the instruction cache.
*/
#define IDC_ENABLE 0x02000000 /* Cache enable */
@@ -39,4 +50,86 @@
#define DC_DFWT 0x40000000 /* Data cache is forced write through */
#define DC_LES 0x20000000 /* Caches are little endian mode */
+#ifdef CONFIG_8xx_CPU6
+#define do_mtspr_cpu6(rn, rn_addr, v) \
+ do { \
+ int _reg_cpu6 = rn_addr, _tmp_cpu6[1]; \
+ asm volatile("stw %0, %1;" \
+ "lwz %0, %1;" \
+ "mtspr " __stringify(rn) ",%2" : \
+ : "r" (_reg_cpu6), "m"(_tmp_cpu6), \
+ "r" ((unsigned long)(v)) \
+ : "memory"); \
+ } while (0)
+
+#define do_mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+#define mtspr(rn, v) \
+ do { \
+ if (rn == SPRN_IMMR) \
+ do_mtspr_cpu6(rn, 0x3d30, v); \
+ else if (rn == SPRN_IC_CST) \
+ do_mtspr_cpu6(rn, 0x2110, v); \
+ else if (rn == SPRN_IC_ADR) \
+ do_mtspr_cpu6(rn, 0x2310, v); \
+ else if (rn == SPRN_IC_DAT) \
+ do_mtspr_cpu6(rn, 0x2510, v); \
+ else if (rn == SPRN_DC_CST) \
+ do_mtspr_cpu6(rn, 0x3110, v); \
+ else if (rn == SPRN_DC_ADR) \
+ do_mtspr_cpu6(rn, 0x3310, v); \
+ else if (rn == SPRN_DC_DAT) \
+ do_mtspr_cpu6(rn, 0x3510, v); \
+ else if (rn == SPRN_MI_CTR) \
+ do_mtspr_cpu6(rn, 0x2180, v); \
+ else if (rn == SPRN_MI_AP) \
+ do_mtspr_cpu6(rn, 0x2580, v); \
+ else if (rn == SPRN_MI_EPN) \
+ do_mtspr_cpu6(rn, 0x2780, v); \
+ else if (rn == SPRN_MI_TWC) \
+ do_mtspr_cpu6(rn, 0x2b80, v); \
+ else if (rn == SPRN_MI_RPN) \
+ do_mtspr_cpu6(rn, 0x2d80, v); \
+ else if (rn == SPRN_MI_CAM) \
+ do_mtspr_cpu6(rn, 0x2190, v); \
+ else if (rn == SPRN_MI_RAM0) \
+ do_mtspr_cpu6(rn, 0x2390, v); \
+ else if (rn == SPRN_MI_RAM1) \
+ do_mtspr_cpu6(rn, 0x2590, v); \
+ else if (rn == SPRN_MD_CTR) \
+ do_mtspr_cpu6(rn, 0x3180, v); \
+ else if (rn == SPRN_M_CASID) \
+ do_mtspr_cpu6(rn, 0x3380, v); \
+ else if (rn == SPRN_MD_AP) \
+ do_mtspr_cpu6(rn, 0x3580, v); \
+ else if (rn == SPRN_MD_EPN) \
+ do_mtspr_cpu6(rn, 0x3780, v); \
+ else if (rn == SPRN_M_TWB) \
+ do_mtspr_cpu6(rn, 0x3980, v); \
+ else if (rn == SPRN_MD_TWC) \
+ do_mtspr_cpu6(rn, 0x3b80, v); \
+ else if (rn == SPRN_MD_RPN) \
+ do_mtspr_cpu6(rn, 0x3d80, v); \
+ else if (rn == SPRN_M_TW) \
+ do_mtspr_cpu6(rn, 0x3f80, v); \
+ else if (rn == SPRN_MD_CAM) \
+ do_mtspr_cpu6(rn, 0x3190, v); \
+ else if (rn == SPRN_MD_RAM0) \
+ do_mtspr_cpu6(rn, 0x3390, v); \
+ else if (rn == SPRN_MD_RAM1) \
+ do_mtspr_cpu6(rn, 0x3590, v); \
+ else if (rn == SPRN_DEC) \
+ do_mtspr_cpu6(rn, 0x2c00, v); \
+ else if (rn == SPRN_TBWL) \
+ do_mtspr_cpu6(rn, 0x3880, v); \
+ else if (rn == SPRN_TBWU) \
+ do_mtspr_cpu6(rn, 0x3a80, v); \
+ else if (rn == SPRN_DPDR) \
+ do_mtspr_cpu6(rn, 0x2d30, v); \
+ else \
+ do_mtspr(rn, v); \
+ } while (0)
+#endif
+
#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 825663c..174271e 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,9 @@ void generic_cpu_die(unsigned int cpu);
void generic_set_cpu_dead(unsigned int cpu);
void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
+int is_cpu_dead(unsigned int cpu);
+#else
+#define generic_set_cpu_up(i) do { } while (0)
#endif
#ifdef CONFIG_PPC64
@@ -197,6 +200,7 @@ extern void generic_secondary_thread_init(void);
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern unsigned int booting_thread_hwid;
extern void __early_start(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 2d7109a..1092fdd 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -31,8 +31,6 @@ extern void tick_broadcast_ipi_handler(void);
extern void generic_calibrate_decr(void);
-extern void set_dec_cpu6(unsigned int val);
-
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
extern unsigned long ppc_proc_freq;
#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
@@ -166,14 +164,12 @@ static inline void set_dec(int val)
{
#if defined(CONFIG_40x)
mtspr(SPRN_PIT, val);
-#elif defined(CONFIG_8xx_CPU6)
- set_dec_cpu6(val - 1);
#else
#ifndef CONFIG_BOOKE
--val;
#endif
mtspr(SPRN_DEC, val);
-#endif /* not 40x or 8xx_CPU6 */
+#endif /* not 40x */
}
static inline unsigned long tb_ticks_since(unsigned long tstamp)