summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMasahiro Yamada <yamada.masahiro@socionext.com>2016-04-26 04:11:13 -0400
committerRussell King <rmk+kernel@armlinux.org.uk>2016-05-05 14:03:39 -0400
commit6427a840ff6aeaac36c59872b0b4b2040ed26c9b (patch)
tree09374292684b9d4d7fd4518854d028abd1398e7c
parent7274a69cd86f61602a49a3d0b64d29b465f46a15 (diff)
ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs
This outer cache allows to control active ways independently for each CPU, but currently nothing is done for secondary CPUs. In other words, all the ways are locked for secondary CPUs by default. This commit fixes it to fully bring out the performance of this outer cache. There would be two possible ways to achieve this: [1] Each CPU initializes active ways for itself. This can be done via the SSCLPDAWCR register. This is a banked register, so each CPU sees a different instance of the register for its own. [2] The master CPU initializes active ways for all the CPUs. This is available via SSCDAWCARMR(N) registers, where all instances of SSCLPDAWCR are mirrored. They are mapped at the address SSCDAWCARMR + 4 * N, where N is the CPU number. The outer cache frame work does not support a per-CPU init callback. So this commit adopts [2]; the master CPU iterates over possible CPUs setting up SSCDAWCARMR(N) registers. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/cache-uniphier.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c
index a6fa7b73fbe0..c8e2f4947223 100644
--- a/arch/arm/mm/cache-uniphier.c
+++ b/arch/arm/mm/cache-uniphier.c
@@ -96,6 +96,7 @@ struct uniphier_cache_data {
96 void __iomem *ctrl_base; 96 void __iomem *ctrl_base;
97 void __iomem *rev_base; 97 void __iomem *rev_base;
98 void __iomem *op_base; 98 void __iomem *op_base;
99 void __iomem *way_ctrl_base;
99 u32 way_present_mask; 100 u32 way_present_mask;
100 u32 way_locked_mask; 101 u32 way_locked_mask;
101 u32 nsets; 102 u32 nsets;
@@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways(
256 struct uniphier_cache_data *data, 257 struct uniphier_cache_data *data,
257 u32 way_mask) 258 u32 way_mask)
258{ 259{
260 unsigned int cpu;
261
259 data->way_locked_mask = way_mask & data->way_present_mask; 262 data->way_locked_mask = way_mask & data->way_present_mask;
260 263
261 writel_relaxed(~data->way_locked_mask & data->way_present_mask, 264 for_each_possible_cpu(cpu)
262 data->ctrl_base + UNIPHIER_SSCLPDAWCR); 265 writel_relaxed(~data->way_locked_mask & data->way_present_mask,
266 data->way_ctrl_base + 4 * cpu);
263} 267}
264 268
265static void uniphier_cache_maint_range(unsigned long start, unsigned long end, 269static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
@@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np,
459 goto err; 463 goto err;
460 } 464 }
461 465
466 data->way_ctrl_base = data->ctrl_base + 0xc00;
467
462 if (*cache_level == 2) { 468 if (*cache_level == 2) {
463 u32 revision = readl(data->rev_base + UNIPHIER_SSCID); 469 u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
464 /* 470 /*
@@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np,
467 */ 473 */
468 if (revision <= 0x16) 474 if (revision <= 0x16)
469 data->range_op_max_size = (u32)1 << 22; 475 data->range_op_max_size = (u32)1 << 22;
476
477 /*
478 * Unfortunatly, the offset address of active way control base
479 * varies from SoC to SoC.
480 */
481 switch (revision) {
482 case 0x11: /* sLD3 */
483 data->way_ctrl_base = data->ctrl_base + 0x870;
484 break;
485 case 0x12: /* LD4 */
486 case 0x16: /* sld8 */
487 data->way_ctrl_base = data->ctrl_base + 0x840;
488 break;
489 default:
490 break;
491 }
470 } 492 }
471 493
472 data->range_op_max_size -= data->line_size; 494 data->range_op_max_size -= data->line_size;