diff options
author | Paul Burton <paul.burton@imgtec.com> | 2017-08-12 22:49:39 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2017-08-29 18:57:27 -0400 |
commit | 68923cdc2eb34124d77bc27f7945d7ff16b236dd (patch) | |
tree | 967332aa37ceb6d533129bec00312b8735eadb9e | |
parent | 5616897efd1816c18231c9976a6d64392fc6cdee (diff) |
MIPS: CM: Add cluster & block args to mips_cm_lock_other()
With CM >= 3.5 we have the notion of multiple clusters & can access
their CM, CPC & GIC registers via the apporpriate redirect/other
register blocks. In order to allow for this introduce cluster & block
arguments to mips_cm_lock_other() which configures the redirect/other
region to point at the appropriate cluster, core, VP & register block.
Since we now have 4 arguments to mips_cm_lock_other() & a common use is
likely to be to target the cluster, core & VP corresponding to a
particular Linux CPU number we also add a new mips_cm_lock_other_cpu()
helper function which handles that without the caller needing to
manually pull out the cluster, core & VP numbers.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/17013/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/include/asm/mips-cm.h | 45 | ||||
-rw-r--r-- | arch/mips/kernel/mips-cm.c | 19 | ||||
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 2 |
4 files changed, 58 insertions, 18 deletions
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 6cfc0cc265d7..d42cc8e76dc2 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h | |||
@@ -437,29 +437,56 @@ static inline unsigned int mips_cm_vp_id(unsigned int cpu) | |||
437 | #ifdef CONFIG_MIPS_CM | 437 | #ifdef CONFIG_MIPS_CM |
438 | 438 | ||
439 | /** | 439 | /** |
440 | * mips_cm_lock_other - lock access to another core | 440 | * mips_cm_lock_other - lock access to redirect/other region |
441 | * @cluster: the other cluster to be accessed | ||
441 | * @core: the other core to be accessed | 442 | * @core: the other core to be accessed |
442 | * @vp: the VP within the other core to be accessed | 443 | * @vp: the VP within the other core to be accessed |
444 | * @block: the register block to be accessed | ||
443 | * | 445 | * |
444 | * Call before operating upon a core via the 'other' register region in | 446 | * Configure the redirect/other region for the local core/VP (depending upon |
445 | * order to prevent the region being moved during access. Must be followed | 447 | * the CM revision) to target the specified @cluster, @core, @vp & register |
446 | * by a call to mips_cm_unlock_other. | 448 | * @block. Must be called before using the redirect/other region, and followed |
449 | * by a call to mips_cm_unlock_other() when access to the redirect/other region | ||
450 | * is complete. | ||
451 | * | ||
452 | * This function acquires a spinlock such that code between it & | ||
453 | * mips_cm_unlock_other() calls cannot be pre-empted by anything which may | ||
454 | * reconfigure the redirect/other region, and cannot be interfered with by | ||
455 | * another VP in the core. As such calls to this function should not be nested. | ||
447 | */ | 456 | */ |
448 | extern void mips_cm_lock_other(unsigned int core, unsigned int vp); | 457 | extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, |
458 | unsigned int vp, unsigned int block); | ||
449 | 459 | ||
450 | /** | 460 | /** |
451 | * mips_cm_unlock_other - unlock access to another core | 461 | * mips_cm_unlock_other - unlock access to redirect/other region |
452 | * | 462 | * |
453 | * Call after operating upon another core via the 'other' register region. | 463 | * Must be called after mips_cm_lock_other() once all required access to the |
454 | * Must be called after mips_cm_lock_other. | 464 | * redirect/other region has been completed. |
455 | */ | 465 | */ |
456 | extern void mips_cm_unlock_other(void); | 466 | extern void mips_cm_unlock_other(void); |
457 | 467 | ||
458 | #else /* !CONFIG_MIPS_CM */ | 468 | #else /* !CONFIG_MIPS_CM */ |
459 | 469 | ||
460 | static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { } | 470 | static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, |
471 | unsigned int vp, unsigned int block) { } | ||
461 | static inline void mips_cm_unlock_other(void) { } | 472 | static inline void mips_cm_unlock_other(void) { } |
462 | 473 | ||
463 | #endif /* !CONFIG_MIPS_CM */ | 474 | #endif /* !CONFIG_MIPS_CM */ |
464 | 475 | ||
476 | /** | ||
477 | * mips_cm_lock_other_cpu - lock access to redirect/other region | ||
478 | * @cpu: the other CPU whose register we want to access | ||
479 | * | ||
480 | * Configure the redirect/other region for the local core/VP (depending upon | ||
481 | * the CM revision) to target the specified @cpu & register @block. This is | ||
482 | * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number | ||
483 | * for convenience. | ||
484 | */ | ||
485 | static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block) | ||
486 | { | ||
487 | struct cpuinfo_mips *d = &cpu_data[cpu]; | ||
488 | |||
489 | mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block); | ||
490 | } | ||
491 | |||
465 | #endif /* __MIPS_ASM_MIPS_CM_H__ */ | 492 | #endif /* __MIPS_ASM_MIPS_CM_H__ */ |
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 77ad5468ee93..47d64fd224ea 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c | |||
@@ -257,17 +257,28 @@ int mips_cm_probe(void) | |||
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
259 | 259 | ||
260 | void mips_cm_lock_other(unsigned int core, unsigned int vp) | 260 | void mips_cm_lock_other(unsigned int cluster, unsigned int core, |
261 | unsigned int vp, unsigned int block) | ||
261 | { | 262 | { |
262 | unsigned curr_core; | 263 | unsigned int curr_core, cm_rev; |
263 | u32 val; | 264 | u32 val; |
264 | 265 | ||
266 | cm_rev = mips_cm_revision(); | ||
265 | preempt_disable(); | 267 | preempt_disable(); |
266 | 268 | ||
267 | if (mips_cm_revision() >= CM_REV_CM3) { | 269 | if (cm_rev >= CM_REV_CM3) { |
268 | val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); | 270 | val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); |
269 | val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); | 271 | val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); |
270 | 272 | ||
273 | if (cm_rev >= CM_REV_CM3_5) { | ||
274 | val |= CM_GCR_Cx_OTHER_CLUSTER_EN; | ||
275 | val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); | ||
276 | val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); | ||
277 | } else { | ||
278 | WARN_ON(cluster != 0); | ||
279 | WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
280 | } | ||
281 | |||
271 | /* | 282 | /* |
272 | * We need to disable interrupts in SMP systems in order to | 283 | * We need to disable interrupts in SMP systems in order to |
273 | * ensure that we don't interrupt the caller with code which | 284 | * ensure that we don't interrupt the caller with code which |
@@ -280,7 +291,9 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) | |||
280 | spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), | 291 | spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), |
281 | *this_cpu_ptr(&cm_core_lock_flags)); | 292 | *this_cpu_ptr(&cm_core_lock_flags)); |
282 | } else { | 293 | } else { |
294 | WARN_ON(cluster != 0); | ||
283 | WARN_ON(vp != 0); | 295 | WARN_ON(vp != 0); |
296 | WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
284 | 297 | ||
285 | /* | 298 | /* |
286 | * We only have a GCR_CL_OTHER per core in systems with | 299 | * We only have a GCR_CL_OTHER per core in systems with |
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 8cc508809466..7aac84ffc2af 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -52,7 +52,7 @@ static unsigned core_vpe_count(unsigned core) | |||
52 | && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) | 52 | && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) |
53 | return 1; | 53 | return 1; |
54 | 54 | ||
55 | mips_cm_lock_other(core, 0); | 55 | mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
56 | cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE; | 56 | cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE; |
57 | mips_cm_unlock_other(); | 57 | mips_cm_unlock_other(); |
58 | return cfg + 1; | 58 | return cfg + 1; |
@@ -214,7 +214,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id) | |||
214 | unsigned timeout; | 214 | unsigned timeout; |
215 | 215 | ||
216 | /* Select the appropriate core */ | 216 | /* Select the appropriate core */ |
217 | mips_cm_lock_other(core, 0); | 217 | mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
218 | 218 | ||
219 | /* Set its reset vector */ | 219 | /* Set its reset vector */ |
220 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); | 220 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); |
@@ -313,7 +313,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) | |||
313 | } | 313 | } |
314 | 314 | ||
315 | if (cpu_has_vp) { | 315 | if (cpu_has_vp) { |
316 | mips_cm_lock_other(core, vpe_id); | 316 | mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
317 | core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); | 317 | core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); |
318 | write_gcr_co_reset_base(core_entry); | 318 | write_gcr_co_reset_base(core_entry); |
319 | mips_cm_unlock_other(); | 319 | mips_cm_unlock_other(); |
@@ -518,7 +518,7 @@ static void cps_cpu_die(unsigned int cpu) | |||
518 | */ | 518 | */ |
519 | fail_time = ktime_add_ms(ktime_get(), 2000); | 519 | fail_time = ktime_add_ms(ktime_get(), 2000); |
520 | do { | 520 | do { |
521 | mips_cm_lock_other(core, 0); | 521 | mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
522 | mips_cpc_lock_other(core); | 522 | mips_cpc_lock_other(core); |
523 | stat = read_cpc_co_stat_conf(); | 523 | stat = read_cpc_co_stat_conf(); |
524 | stat &= CPC_Cx_STAT_CONF_SEQSTATE; | 524 | stat &= CPC_Cx_STAT_CONF_SEQSTATE; |
@@ -562,7 +562,7 @@ static void cps_cpu_die(unsigned int cpu) | |||
562 | panic("Failed to call remote sibling CPU\n"); | 562 | panic("Failed to call remote sibling CPU\n"); |
563 | } else if (cpu_has_vp) { | 563 | } else if (cpu_has_vp) { |
564 | do { | 564 | do { |
565 | mips_cm_lock_other(core, vpe_id); | 565 | mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
566 | stat = read_cpc_co_vp_running(); | 566 | stat = read_cpc_co_vp_running(); |
567 | mips_cm_unlock_other(); | 567 | mips_cm_unlock_other(); |
568 | } while (stat & (1 << vpe_id)); | 568 | } while (stat & (1 << vpe_id)); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 4cc43892b959..6248a5a3ec9e 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -190,7 +190,7 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) | |||
190 | core = cpu_core(&cpu_data[cpu]); | 190 | core = cpu_core(&cpu_data[cpu]); |
191 | 191 | ||
192 | while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { | 192 | while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { |
193 | mips_cm_lock_other(core, 0); | 193 | mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
194 | mips_cpc_lock_other(core); | 194 | mips_cpc_lock_other(core); |
195 | write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); | 195 | write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); |
196 | mips_cpc_unlock_other(); | 196 | mips_cpc_unlock_other(); |