141 lines
4.2 KiB
Diff
141 lines
4.2 KiB
Diff
|
commit 23d5de8efb9aed48074a72bf3d43841e1556ca42
|
||
|
Author: Paul Burton <paul.burton@imgtec.com>
|
||
|
Date: Tue Sep 22 11:12:16 2015 -0700
|
||
|
|
||
|
MIPS: CM: Introduce core-other locking functions
|
||
|
|
||
|
Introduce mips_cm_lock_other & mips_cm_unlock_other, mirroring the
|
||
|
existing CPC equivalents, in order to lock access from the current core
|
||
|
to another via the core-other GCR region. This hasn't been required in
|
||
|
the past but with CM3 the CPC starts using GCR_CL_OTHER rather than
|
||
|
CPC_CL_OTHER and this will be required for safety.
|
||
|
|
||
|
[ralf@linux-mips.org: Fix merge conflict.]
|
||
|
|
||
|
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
|
||
|
Cc: linux-mips@linux-mips.org
|
||
|
Cc: linux-kernel@vger.kernel.org
|
||
|
Cc: James Hogan <james.hogan@imgtec.com>
|
||
|
Cc: Markos Chandras <markos.chandras@imgtec.com>
|
||
|
Patchwork: https://patchwork.linux-mips.org/patch/11207/
|
||
|
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
|
||
|
|
||
|
--- a/arch/mips/include/asm/mips-cm.h
|
||
|
+++ b/arch/mips/include/asm/mips-cm.h
|
||
|
@@ -334,6 +334,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
|
||
|
/* GCR_Cx_OTHER register fields */
|
||
|
#define CM_GCR_Cx_OTHER_CORENUM_SHF 16
|
||
|
#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16)
|
||
|
+#define CM3_GCR_Cx_OTHER_CORE_SHF 8
|
||
|
+#define CM3_GCR_Cx_OTHER_CORE_MSK (_ULCAST_(0x3f) << 8)
|
||
|
+#define CM3_GCR_Cx_OTHER_VP_SHF 0
|
||
|
+#define CM3_GCR_Cx_OTHER_VP_MSK (_ULCAST_(0x7) << 0)
|
||
|
|
||
|
/* GCR_Cx_RESET_BASE register fields */
|
||
|
#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12
|
||
|
@@ -444,4 +448,32 @@ static inline unsigned int mips_cm_vp_id
|
||
|
return (core * mips_cm_max_vp_width()) + vp;
|
||
|
}
|
||
|
|
||
|
+#ifdef CONFIG_MIPS_CM
|
||
|
+
|
||
|
+/**
|
||
|
+ * mips_cm_lock_other - lock access to another core
|
||
|
+ * @core: the other core to be accessed
|
||
|
+ * @vp: the VP within the other core to be accessed
|
||
|
+ *
|
||
|
+ * Call before operating upon a core via the 'other' register region in
|
||
|
+ * order to prevent the region being moved during access. Must be followed
|
||
|
+ * by a call to mips_cm_unlock_other.
|
||
|
+ */
|
||
|
+extern void mips_cm_lock_other(unsigned int core, unsigned int vp);
|
||
|
+
|
||
|
+/**
|
||
|
+ * mips_cm_unlock_other - unlock access to another core
|
||
|
+ *
|
||
|
+ * Call after operating upon another core via the 'other' register region.
|
||
|
+ * Must be called after mips_cm_lock_other.
|
||
|
+ */
|
||
|
+extern void mips_cm_unlock_other(void);
|
||
|
+
|
||
|
+#else /* !CONFIG_MIPS_CM */
|
||
|
+
|
||
|
+static inline void mips_cm_lock_other(unsigned int core) { }
|
||
|
+static inline void mips_cm_unlock_other(void) { }
|
||
|
+
|
||
|
+#endif /* !CONFIG_MIPS_CM */
|
||
|
+
|
||
|
#endif /* __MIPS_ASM_MIPS_CM_H__ */
|
||
|
--- a/arch/mips/kernel/mips-cm.c
|
||
|
+++ b/arch/mips/kernel/mips-cm.c
|
||
|
@@ -9,6 +9,8 @@
|
||
|
*/
|
||
|
|
||
|
#include <linux/errno.h>
|
||
|
+#include <linux/percpu.h>
|
||
|
+#include <linux/spinlock.h>
|
||
|
|
||
|
#include <asm/mips-cm.h>
|
||
|
#include <asm/mipsregs.h>
|
||
|
@@ -136,6 +138,9 @@ static char *cm3_causes[32] = {
|
||
|
"0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
|
||
|
};
|
||
|
|
||
|
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
|
||
|
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
|
||
|
+
|
||
|
phys_addr_t __mips_cm_phys_base(void)
|
||
|
{
|
||
|
u32 config3 = read_c0_config3();
|
||
|
@@ -200,6 +205,7 @@ int mips_cm_probe(void)
|
||
|
{
|
||
|
phys_addr_t addr;
|
||
|
u32 base_reg;
|
||
|
+ unsigned cpu;
|
||
|
|
||
|
/*
|
||
|
* No need to probe again if we have already been
|
||
|
@@ -247,9 +253,42 @@ int mips_cm_probe(void)
|
||
|
/* determine register width for this CM */
|
||
|
mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
|
||
|
|
||
|
+ for_each_possible_cpu(cpu)
|
||
|
+ spin_lock_init(&per_cpu(cm_core_lock, cpu));
|
||
|
+
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
+void mips_cm_lock_other(unsigned int core, unsigned int vp)
|
||
|
+{
|
||
|
+ unsigned curr_core;
|
||
|
+ u32 val;
|
||
|
+
|
||
|
+ preempt_disable();
|
||
|
+ curr_core = current_cpu_data.core;
|
||
|
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
|
||
|
+ per_cpu(cm_core_lock_flags, curr_core));
|
||
|
+
|
||
|
+ if (mips_cm_revision() >= CM_REV_CM3) {
|
||
|
+ val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
|
||
|
+ val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
|
||
|
+ } else {
|
||
|
+ BUG_ON(vp != 0);
|
||
|
+ val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
|
||
|
+ }
|
||
|
+
|
||
|
+ write_gcr_cl_other(val);
|
||
|
+}
|
||
|
+
|
||
|
+void mips_cm_unlock_other(void)
|
||
|
+{
|
||
|
+ unsigned curr_core = current_cpu_data.core;
|
||
|
+
|
||
|
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
|
||
|
+ per_cpu(cm_core_lock_flags, curr_core));
|
||
|
+ preempt_enable();
|
||
|
+}
|
||
|
+
|
||
|
void mips_cm_error_report(void)
|
||
|
{
|
||
|
unsigned long revision = mips_cm_revision();
|