summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-07-13 00:11:47 (GMT)
committerRivera Jose-B46482 <Jose.G.Rivera@freescale.com>2013-08-07 19:46:56 (GMT)
commitb441fdd11651990ba285843c8de0fd81667861c6 (patch)
tree5ce9b7047b312901495a10f80ad3069311546e9b /arch
parent87f9e29a73beda4296a024645c3a7250a8f6876a (diff)
downloadlinux-fsl-qoriq-b441fdd11651990ba285843c8de0fd81667861c6.tar.xz
powerpc/fsl-booke: Work around erratum A-006958
Erratum A-006598 says that 64-bit mftb is not atomic -- it's subject to a similar race condition as doing mftbu/mftbl on 32-bit. The lower half of timebase is updated before the upper half; thus, we can share the workaround for a similar bug on Cell. This workaround involves looping if the lower half of timebase is zero, thus avoiding the need for a scratch register (other than CR0). This workaround must be avoided when the timebase is frozen, such as during the timebase sync code. This deals with kernel and vdso accesses, but other userspace accesses will of course need to be fixed elsewhere. Signed-off-by: Scott Wood <scottwood@freescale.com> Change-Id: I88da5fef252872ba17c4496ed1a053dc4be645af --- v2: Share the Cell workaround instead of using the workaround suggested by the erratum. Reviewed-on: http://git.am.freescale.net:8181/3749 Reviewed-by: Rivera Jose-B46482 <Jose.G.Rivera@freescale.com> Tested-by: Rivera Jose-B46482 <Jose.G.Rivera@freescale.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/cputable.h4
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c23
4 files changed, 27 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b7d2747..e354127 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -369,12 +369,12 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_SMT)
+ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 635fc0b..3a853ae 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -362,7 +362,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
#define ISYNC_601
#endif
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define MFTB(dest) \
90: mftb dest; \
BEGIN_FTR_SECTION_NESTED(96); \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f1b79a6..cad9faa 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1058,7 +1058,7 @@
: "memory")
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
asm volatile( \
"90: mftb %0;\n" \
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 30721ac..68f65ca 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -130,7 +130,30 @@ static void __cpuinit mpc85xx_give_timebase(void)
tb_req = 0;
mpc85xx_timebase_freeze(1);
+#ifdef CONFIG_PPC64
+ /*
+ * e5500/e6500 have a workaround for erratum A-006958 in place
+ * that will reread the timebase until TBL is non-zero.
+ * That would be a bad thing when the timebase is frozen.
+ *
+ * Thus, we read it manually, and instead of checking that
+ * TBL is non-zero, we ensure that TB does not change. We don't
+ * do that for the main mftb implementation, because it requires
+ * a scratch register
+ */
+ {
+ u64 prev;
+
+ asm volatile("mftb %0" : "=r" (timebase));
+
+ do {
+ prev = timebase;
+ asm volatile("mftb %0" : "=r" (timebase));
+ } while (prev != timebase);
+ }
+#else
timebase = get_tb();
+#endif
mb();
tb_valid = 1;