summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/mips-cpc.c
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2014-02-14 09:28:06 (GMT)
committerPaul Burton <paul.burton@imgtec.com>2014-05-02 15:39:14 (GMT)
commit76ae658465c2319a63f3814b1e1e6e0664a1f542 (patch)
tree4af72b0569cbd60201131e23a9a5da161a638276 /arch/mips/kernel/mips-cpc.c
parent2ba60250b01bfbab6b2293f8c1492312eb6a4131 (diff)
downloadlinux-76ae658465c2319a63f3814b1e1e6e0664a1f542.tar.xz
MIPS: CPC: provide locking functions
This patch provides functions to lock & unlock access to the "core-other" register region of the CPC. Without performing appropriate locking it is possible for code using this region to be preempted or to race with code on another VPE within the same core, with one changing the core which the "core-other" region is acting upon at an inopportune time for the other. Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Diffstat (limited to 'arch/mips/kernel/mips-cpc.c')
-rw-r--r--arch/mips/kernel/mips-cpc.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index c9dc674..2368fc5 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -15,6 +15,10 @@
void __iomem *mips_cpc_base;
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
phys_t __weak mips_cpc_phys_base(void)
{
u32 cpc_base;
@@ -39,6 +43,10 @@ phys_t __weak mips_cpc_phys_base(void)
int mips_cpc_probe(void)
{
phys_t addr;
+ unsigned cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cpc_core_lock, cpu));
addr = mips_cpc_phys_base();
if (!addr)
@@ -50,3 +58,21 @@ int mips_cpc_probe(void)
return 0;
}
+
+void mips_cpc_lock_other(unsigned int core)
+{
+ unsigned curr_core;
+ preempt_disable();
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+}
+
+void mips_cpc_unlock_other(void)
+{
+ unsigned curr_core = current_cpu_data.core;
+ spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ preempt_enable();
+}