aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorGregory CLEMENT <gregory.clement@free-electrons.com>2012-11-05 19:58:07 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-11-06 14:47:35 -0500
commitb8db6b886a1fecd6a5b1d13b190f3149247305ef (patch)
tree382e1fc446b99ee32e3f64d399f27e3af32d5782 /arch/arm/mm
parentc3545236e8740ab556022f87685d18503c86e187 (diff)
ARM: 7547/4: cache-l2x0: add support for Aurora L2 cache ctrl
Aurora Cache Controller was designed to be compatible with the ARM L2 Cache Controller. It comes with some difference or improvement such as: - no cache id part number available through hardware (need to get it by the DT). - always write through mode available. - two flavors of the controller outer cache and system cache (meaning maintenance operations on L1 are broadcasted to the L2 and L2 performs the same operation). - in outer cache mode, the cache maintenance operations are improved and can be done on a range inside a page and are not limited to a cache line. Tested-and-Reviewed-by: Lior Amsalem <alior@marvell.com> Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> Signed-off-by: Yehuda Yitschak <yehuday@marvell.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-aurora-l2.h55
-rw-r--r--arch/arm/mm/cache-l2x0.c223
2 files changed, 265 insertions, 13 deletions
diff --git a/arch/arm/mm/cache-aurora-l2.h b/arch/arm/mm/cache-aurora-l2.h
new file mode 100644
index 000000000000..c86124769831
--- /dev/null
+++ b/arch/arm/mm/cache-aurora-l2.h
@@ -0,0 +1,55 @@
1/*
2 * AURORA shared L2 cache controller support
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
15#define __ASM_ARM_HARDWARE_AURORA_L2_H
16
17#define AURORA_SYNC_REG 0x700
18#define AURORA_RANGE_BASE_ADDR_REG 0x720
19#define AURORA_FLUSH_PHY_ADDR_REG 0x7f0
20#define AURORA_INVAL_RANGE_REG 0x774
21#define AURORA_CLEAN_RANGE_REG 0x7b4
22#define AURORA_FLUSH_RANGE_REG 0x7f4
23
24#define AURORA_ACR_REPLACEMENT_OFFSET 27
25#define AURORA_ACR_REPLACEMENT_MASK \
26 (0x3 << AURORA_ACR_REPLACEMENT_OFFSET)
27#define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \
28 (0 << AURORA_ACR_REPLACEMENT_OFFSET)
29#define AURORA_ACR_REPLACEMENT_TYPE_LFSR \
30 (1 << AURORA_ACR_REPLACEMENT_OFFSET)
31#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
32 (3 << AURORA_ACR_REPLACEMENT_OFFSET)
33
34#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
35#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
36 (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
37#define AURORA_ACR_FORCE_WRITE_POLICY_DIS \
38 (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
39#define AURORA_ACR_FORCE_WRITE_BACK_POLICY \
40 (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
41#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
42 (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
43
44#define MAX_RANGE_SIZE 1024
45
46#define AURORA_WAY_SIZE_SHIFT 2
47
48#define AURORA_CTRL_FW 0x100
49
50/* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make
51 * the distinction between a number coming from hardware and a number
52 * coming from the device tree */
53#define AURORA_CACHE_ID 0x100
54
55#endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index db55d18691ed..6911b8b2745c 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -25,6 +25,7 @@
25 25
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
28#include "cache-aurora-l2.h"
28 29
29#define CACHE_LINE_SIZE 32 30#define CACHE_LINE_SIZE 32
30 31
@@ -34,6 +35,10 @@ static u32 l2x0_way_mask; /* Bitmask of active ways */
34static u32 l2x0_size; 35static u32 l2x0_size;
35static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 36static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
36 37
38/* Aurora don't have the cache ID register available, so we have to
39 * pass it though the device tree */
40static u32 cache_id_part_number_from_dt;
41
37struct l2x0_regs l2x0_saved_regs; 42struct l2x0_regs l2x0_saved_regs;
38 43
39struct l2x0_of_data { 44struct l2x0_of_data {
@@ -170,7 +175,7 @@ static void l2x0_inv_all(void)
170 /* invalidate all ways */ 175 /* invalidate all ways */
171 raw_spin_lock_irqsave(&l2x0_lock, flags); 176 raw_spin_lock_irqsave(&l2x0_lock, flags);
172 /* Invalidating when L2 is enabled is a nono */ 177 /* Invalidating when L2 is enabled is a nono */
173 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 178 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
174 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 179 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
175 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 180 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
176 cache_sync(); 181 cache_sync();
@@ -294,11 +299,18 @@ static void l2x0_unlock(u32 cache_id)
294 int lockregs; 299 int lockregs;
295 int i; 300 int i;
296 301
297 if (cache_id == L2X0_CACHE_ID_PART_L310) 302 switch (cache_id) {
303 case L2X0_CACHE_ID_PART_L310:
298 lockregs = 8; 304 lockregs = 8;
299 else 305 break;
306 case AURORA_CACHE_ID:
307 lockregs = 4;
308 break;
309 default:
300 /* L210 and unknown types */ 310 /* L210 and unknown types */
301 lockregs = 1; 311 lockregs = 1;
312 break;
313 }
302 314
303 for (i = 0; i < lockregs; i++) { 315 for (i = 0; i < lockregs; i++) {
304 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 316 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
@@ -314,18 +326,22 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
314 u32 cache_id; 326 u32 cache_id;
315 u32 way_size = 0; 327 u32 way_size = 0;
316 int ways; 328 int ways;
329 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
317 const char *type; 330 const char *type;
318 331
319 l2x0_base = base; 332 l2x0_base = base;
320 333 if (cache_id_part_number_from_dt)
321 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 334 cache_id = cache_id_part_number_from_dt;
335 else
336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
337 & L2X0_CACHE_ID_PART_MASK;
322 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
323 339
324 aux &= aux_mask; 340 aux &= aux_mask;
325 aux |= aux_val; 341 aux |= aux_val;
326 342
327 /* Determine the number of ways */ 343 /* Determine the number of ways */
328 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 344 switch (cache_id) {
329 case L2X0_CACHE_ID_PART_L310: 345 case L2X0_CACHE_ID_PART_L310:
330 if (aux & (1 << 16)) 346 if (aux & (1 << 16))
331 ways = 16; 347 ways = 16;
@@ -342,6 +358,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
342 ways = (aux >> 13) & 0xf; 358 ways = (aux >> 13) & 0xf;
343 type = "L210"; 359 type = "L210";
344 break; 360 break;
361
362 case AURORA_CACHE_ID:
363 sync_reg_offset = AURORA_SYNC_REG;
364 ways = (aux >> 13) & 0xf;
365 ways = 2 << ((ways + 1) >> 2);
366 way_size_shift = AURORA_WAY_SIZE_SHIFT;
367 type = "Aurora";
368 break;
345 default: 369 default:
346 /* Assume unknown chips have 8 ways */ 370 /* Assume unknown chips have 8 ways */
347 ways = 8; 371 ways = 8;
@@ -355,7 +379,8 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
355 * L2 cache Size = Way size * Number of ways 379 * L2 cache Size = Way size * Number of ways
356 */ 380 */
357 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 381 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
358 way_size = 1 << (way_size + 3); 382 way_size = 1 << (way_size + way_size_shift);
383
359 l2x0_size = ways * way_size * SZ_1K; 384 l2x0_size = ways * way_size * SZ_1K;
360 385
361 /* 386 /*
@@ -363,7 +388,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
363 * If you are booting from non-secure mode 388 * If you are booting from non-secure mode
364 * accessing the below registers will fault. 389 * accessing the below registers will fault.
365 */ 390 */
366 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 391 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
367 /* Make sure that I&D is not locked down when starting */ 392 /* Make sure that I&D is not locked down when starting */
368 l2x0_unlock(cache_id); 393 l2x0_unlock(cache_id);
369 394
@@ -373,7 +398,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
373 l2x0_inv_all(); 398 l2x0_inv_all();
374 399
375 /* enable L2X0 */ 400 /* enable L2X0 */
376 writel_relaxed(1, l2x0_base + L2X0_CTRL); 401 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
377 } 402 }
378 403
379 /* Re-read it in case some bits are reserved. */ 404 /* Re-read it in case some bits are reserved. */
@@ -398,6 +423,100 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
398} 423}
399 424
400#ifdef CONFIG_OF 425#ifdef CONFIG_OF
426static int l2_wt_override;
427
428/*
429 * Note that the end addresses passed to Linux primitives are
430 * noninclusive, while the hardware cache range operations use
431 * inclusive start and end addresses.
432 */
433static unsigned long calc_range_end(unsigned long start, unsigned long end)
434{
435 /*
436 * Limit the number of cache lines processed at once,
437 * since cache range operations stall the CPU pipeline
438 * until completion.
439 */
440 if (end > start + MAX_RANGE_SIZE)
441 end = start + MAX_RANGE_SIZE;
442
443 /*
444 * Cache range operations can't straddle a page boundary.
445 */
446 if (end > PAGE_ALIGN(start+1))
447 end = PAGE_ALIGN(start+1);
448
449 return end;
450}
451
452/*
453 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
454 * and range operations only do a TLB lookup on the start address.
455 */
456static void aurora_pa_range(unsigned long start, unsigned long end,
457 unsigned long offset)
458{
459 unsigned long flags;
460
461 raw_spin_lock_irqsave(&l2x0_lock, flags);
462 writel(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
463 writel(end, l2x0_base + offset);
464 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
465
466 cache_sync();
467}
468
469static void aurora_inv_range(unsigned long start, unsigned long end)
470{
471 /*
472 * round start and end adresses up to cache line size
473 */
474 start &= ~(CACHE_LINE_SIZE - 1);
475 end = ALIGN(end, CACHE_LINE_SIZE);
476
477 /*
478 * Invalidate all full cache lines between 'start' and 'end'.
479 */
480 while (start < end) {
481 unsigned long range_end = calc_range_end(start, end);
482 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
483 AURORA_INVAL_RANGE_REG);
484 start = range_end;
485 }
486}
487
488static void aurora_clean_range(unsigned long start, unsigned long end)
489{
490 /*
491 * If L2 is forced to WT, the L2 will always be clean and we
492 * don't need to do anything here.
493 */
494 if (!l2_wt_override) {
495 start &= ~(CACHE_LINE_SIZE - 1);
496 end = ALIGN(end, CACHE_LINE_SIZE);
497 while (start != end) {
498 unsigned long range_end = calc_range_end(start, end);
499 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
500 AURORA_CLEAN_RANGE_REG);
501 start = range_end;
502 }
503 }
504}
505
506static void aurora_flush_range(unsigned long start, unsigned long end)
507{
508 if (!l2_wt_override) {
509 start &= ~(CACHE_LINE_SIZE - 1);
510 end = ALIGN(end, CACHE_LINE_SIZE);
511 while (start != end) {
512 unsigned long range_end = calc_range_end(start, end);
513 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
514 AURORA_FLUSH_RANGE_REG);
515 start = range_end;
516 }
517 }
518}
519
401static void __init l2x0_of_setup(const struct device_node *np, 520static void __init l2x0_of_setup(const struct device_node *np,
402 u32 *aux_val, u32 *aux_mask) 521 u32 *aux_val, u32 *aux_mask)
403{ 522{
@@ -495,9 +614,15 @@ static void __init pl310_save(void)
495 } 614 }
496} 615}
497 616
617static void aurora_save(void)
618{
619 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
620 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
621}
622
498static void l2x0_resume(void) 623static void l2x0_resume(void)
499{ 624{
500 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 625 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
501 /* restore aux ctrl and enable l2 */ 626 /* restore aux ctrl and enable l2 */
502 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 627 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
503 628
@@ -506,7 +631,7 @@ static void l2x0_resume(void)
506 631
507 l2x0_inv_all(); 632 l2x0_inv_all();
508 633
509 writel_relaxed(1, l2x0_base + L2X0_CTRL); 634 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
510 } 635 }
511} 636}
512 637
@@ -514,7 +639,7 @@ static void pl310_resume(void)
514{ 639{
515 u32 l2x0_revision; 640 u32 l2x0_revision;
516 641
517 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 642 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
518 /* restore pl310 setup */ 643 /* restore pl310 setup */
519 writel_relaxed(l2x0_saved_regs.tag_latency, 644 writel_relaxed(l2x0_saved_regs.tag_latency,
520 l2x0_base + L2X0_TAG_LATENCY_CTRL); 645 l2x0_base + L2X0_TAG_LATENCY_CTRL);
@@ -540,6 +665,46 @@ static void pl310_resume(void)
540 l2x0_resume(); 665 l2x0_resume();
541} 666}
542 667
668static void aurora_resume(void)
669{
670 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
671 writel(l2x0_saved_regs.aux_ctrl, l2x0_base + L2X0_AUX_CTRL);
672 writel(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
673 }
674}
675
676static void __init aurora_broadcast_l2_commands(void)
677{
678 __u32 u;
679 /* Enable Broadcasting of cache commands to L2*/
680 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
681 u |= AURORA_CTRL_FW; /* Set the FW bit */
682 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
683 isb();
684}
685
686static void __init aurora_of_setup(const struct device_node *np,
687 u32 *aux_val, u32 *aux_mask)
688{
689 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
690 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
691
692 of_property_read_u32(np, "cache-id-part",
693 &cache_id_part_number_from_dt);
694
695 /* Determine and save the write policy */
696 l2_wt_override = of_property_read_bool(np, "wt-override");
697
698 if (l2_wt_override) {
699 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
700 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
701 }
702
703 *aux_val &= ~mask;
704 *aux_val |= val;
705 *aux_mask &= ~mask;
706}
707
543static const struct l2x0_of_data pl310_data = { 708static const struct l2x0_of_data pl310_data = {
544 .setup = pl310_of_setup, 709 .setup = pl310_of_setup,
545 .save = pl310_save, 710 .save = pl310_save,
@@ -571,10 +736,37 @@ static const struct l2x0_of_data l2x0_data = {
571 }, 736 },
572}; 737};
573 738
739static const struct l2x0_of_data aurora_with_outer_data = {
740 .setup = aurora_of_setup,
741 .save = aurora_save,
742 .outer_cache = {
743 .resume = aurora_resume,
744 .inv_range = aurora_inv_range,
745 .clean_range = aurora_clean_range,
746 .flush_range = aurora_flush_range,
747 .sync = l2x0_cache_sync,
748 .flush_all = l2x0_flush_all,
749 .inv_all = l2x0_inv_all,
750 .disable = l2x0_disable,
751 },
752};
753
754static const struct l2x0_of_data aurora_no_outer_data = {
755 .setup = aurora_of_setup,
756 .save = aurora_save,
757 .outer_cache = {
758 .resume = aurora_resume,
759 },
760};
761
574static const struct of_device_id l2x0_ids[] __initconst = { 762static const struct of_device_id l2x0_ids[] __initconst = {
575 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 763 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
576 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 764 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
577 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 765 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
766 { .compatible = "marvell,aurora-system-cache",
767 .data = (void *)&aurora_no_outer_data},
768 { .compatible = "marvell,aurora-outer-cache",
769 .data = (void *)&aurora_with_outer_data},
578 {} 770 {}
579}; 771};
580 772
@@ -600,9 +792,14 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
600 data = of_match_node(l2x0_ids, np)->data; 792 data = of_match_node(l2x0_ids, np)->data;
601 793
602 /* L2 configuration can only be changed if the cache is disabled */ 794 /* L2 configuration can only be changed if the cache is disabled */
603 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 795 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
604 if (data->setup) 796 if (data->setup)
605 data->setup(np, &aux_val, &aux_mask); 797 data->setup(np, &aux_val, &aux_mask);
798
799 /* For aurora cache in no outer mode select the
800 * correct mode using the coprocessor*/
801 if (data == &aurora_no_outer_data)
802 aurora_broadcast_l2_commands();
606 } 803 }
607 804
608 if (data->save) 805 if (data->save)