aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-l2x0.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r--arch/arm/mm/cache-l2x0.c278
1 files changed, 250 insertions, 28 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8a97e6443c62..6911b8b2745c 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -25,6 +25,7 @@
25 25
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
28#include "cache-aurora-l2.h"
28 29
29#define CACHE_LINE_SIZE 32 30#define CACHE_LINE_SIZE 32
30 31
@@ -34,14 +35,20 @@ static u32 l2x0_way_mask; /* Bitmask of active ways */
34static u32 l2x0_size; 35static u32 l2x0_size;
35static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 36static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
36 37
38/* Aurora don't have the cache ID register available, so we have to
39 * pass it though the device tree */
40static u32 cache_id_part_number_from_dt;
41
37struct l2x0_regs l2x0_saved_regs; 42struct l2x0_regs l2x0_saved_regs;
38 43
39struct l2x0_of_data { 44struct l2x0_of_data {
40 void (*setup)(const struct device_node *, u32 *, u32 *); 45 void (*setup)(const struct device_node *, u32 *, u32 *);
41 void (*save)(void); 46 void (*save)(void);
42 void (*resume)(void); 47 struct outer_cache_fns outer_cache;
43}; 48};
44 49
50static bool of_init = false;
51
45static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 52static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
46{ 53{
47 /* wait for cache operation by line or way to complete */ 54 /* wait for cache operation by line or way to complete */
@@ -168,7 +175,7 @@ static void l2x0_inv_all(void)
168 /* invalidate all ways */ 175 /* invalidate all ways */
169 raw_spin_lock_irqsave(&l2x0_lock, flags); 176 raw_spin_lock_irqsave(&l2x0_lock, flags);
170 /* Invalidating when L2 is enabled is a nono */ 177 /* Invalidating when L2 is enabled is a nono */
171 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 178 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
172 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 179 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
173 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 180 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
174 cache_sync(); 181 cache_sync();
@@ -292,11 +299,18 @@ static void l2x0_unlock(u32 cache_id)
292 int lockregs; 299 int lockregs;
293 int i; 300 int i;
294 301
295 if (cache_id == L2X0_CACHE_ID_PART_L310) 302 switch (cache_id) {
303 case L2X0_CACHE_ID_PART_L310:
296 lockregs = 8; 304 lockregs = 8;
297 else 305 break;
306 case AURORA_CACHE_ID:
307 lockregs = 4;
308 break;
309 default:
298 /* L210 and unknown types */ 310 /* L210 and unknown types */
299 lockregs = 1; 311 lockregs = 1;
312 break;
313 }
300 314
301 for (i = 0; i < lockregs; i++) { 315 for (i = 0; i < lockregs; i++) {
302 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 316 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
@@ -312,18 +326,22 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
312 u32 cache_id; 326 u32 cache_id;
313 u32 way_size = 0; 327 u32 way_size = 0;
314 int ways; 328 int ways;
329 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
315 const char *type; 330 const char *type;
316 331
317 l2x0_base = base; 332 l2x0_base = base;
318 333 if (cache_id_part_number_from_dt)
319 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 334 cache_id = cache_id_part_number_from_dt;
335 else
336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
337 & L2X0_CACHE_ID_PART_MASK;
320 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
321 339
322 aux &= aux_mask; 340 aux &= aux_mask;
323 aux |= aux_val; 341 aux |= aux_val;
324 342
325 /* Determine the number of ways */ 343 /* Determine the number of ways */
326 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 344 switch (cache_id) {
327 case L2X0_CACHE_ID_PART_L310: 345 case L2X0_CACHE_ID_PART_L310:
328 if (aux & (1 << 16)) 346 if (aux & (1 << 16))
329 ways = 16; 347 ways = 16;
@@ -340,6 +358,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
340 ways = (aux >> 13) & 0xf; 358 ways = (aux >> 13) & 0xf;
341 type = "L210"; 359 type = "L210";
342 break; 360 break;
361
362 case AURORA_CACHE_ID:
363 sync_reg_offset = AURORA_SYNC_REG;
364 ways = (aux >> 13) & 0xf;
365 ways = 2 << ((ways + 1) >> 2);
366 way_size_shift = AURORA_WAY_SIZE_SHIFT;
367 type = "Aurora";
368 break;
343 default: 369 default:
344 /* Assume unknown chips have 8 ways */ 370 /* Assume unknown chips have 8 ways */
345 ways = 8; 371 ways = 8;
@@ -353,7 +379,8 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
353 * L2 cache Size = Way size * Number of ways 379 * L2 cache Size = Way size * Number of ways
354 */ 380 */
355 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 381 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
356 way_size = 1 << (way_size + 3); 382 way_size = 1 << (way_size + way_size_shift);
383
357 l2x0_size = ways * way_size * SZ_1K; 384 l2x0_size = ways * way_size * SZ_1K;
358 385
359 /* 386 /*
@@ -361,7 +388,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
361 * If you are booting from non-secure mode 388 * If you are booting from non-secure mode
362 * accessing the below registers will fault. 389 * accessing the below registers will fault.
363 */ 390 */
364 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 391 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
365 /* Make sure that I&D is not locked down when starting */ 392 /* Make sure that I&D is not locked down when starting */
366 l2x0_unlock(cache_id); 393 l2x0_unlock(cache_id);
367 394
@@ -371,7 +398,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
371 l2x0_inv_all(); 398 l2x0_inv_all();
372 399
373 /* enable L2X0 */ 400 /* enable L2X0 */
374 writel_relaxed(1, l2x0_base + L2X0_CTRL); 401 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
375 } 402 }
376 403
377 /* Re-read it in case some bits are reserved. */ 404 /* Re-read it in case some bits are reserved. */
@@ -380,13 +407,15 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
380 /* Save the value for resuming. */ 407 /* Save the value for resuming. */
381 l2x0_saved_regs.aux_ctrl = aux; 408 l2x0_saved_regs.aux_ctrl = aux;
382 409
383 outer_cache.inv_range = l2x0_inv_range; 410 if (!of_init) {
384 outer_cache.clean_range = l2x0_clean_range; 411 outer_cache.inv_range = l2x0_inv_range;
385 outer_cache.flush_range = l2x0_flush_range; 412 outer_cache.clean_range = l2x0_clean_range;
386 outer_cache.sync = l2x0_cache_sync; 413 outer_cache.flush_range = l2x0_flush_range;
387 outer_cache.flush_all = l2x0_flush_all; 414 outer_cache.sync = l2x0_cache_sync;
388 outer_cache.inv_all = l2x0_inv_all; 415 outer_cache.flush_all = l2x0_flush_all;
389 outer_cache.disable = l2x0_disable; 416 outer_cache.inv_all = l2x0_inv_all;
417 outer_cache.disable = l2x0_disable;
418 }
390 419
391 printk(KERN_INFO "%s cache controller enabled\n", type); 420 printk(KERN_INFO "%s cache controller enabled\n", type);
392 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 421 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
@@ -394,6 +423,100 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
394} 423}
395 424
396#ifdef CONFIG_OF 425#ifdef CONFIG_OF
426static int l2_wt_override;
427
428/*
429 * Note that the end addresses passed to Linux primitives are
430 * noninclusive, while the hardware cache range operations use
431 * inclusive start and end addresses.
432 */
433static unsigned long calc_range_end(unsigned long start, unsigned long end)
434{
435 /*
436 * Limit the number of cache lines processed at once,
437 * since cache range operations stall the CPU pipeline
438 * until completion.
439 */
440 if (end > start + MAX_RANGE_SIZE)
441 end = start + MAX_RANGE_SIZE;
442
443 /*
444 * Cache range operations can't straddle a page boundary.
445 */
446 if (end > PAGE_ALIGN(start+1))
447 end = PAGE_ALIGN(start+1);
448
449 return end;
450}
451
452/*
453 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
454 * and range operations only do a TLB lookup on the start address.
455 */
456static void aurora_pa_range(unsigned long start, unsigned long end,
457 unsigned long offset)
458{
459 unsigned long flags;
460
461 raw_spin_lock_irqsave(&l2x0_lock, flags);
462 writel(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
463 writel(end, l2x0_base + offset);
464 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
465
466 cache_sync();
467}
468
469static void aurora_inv_range(unsigned long start, unsigned long end)
470{
471 /*
472 * round start and end adresses up to cache line size
473 */
474 start &= ~(CACHE_LINE_SIZE - 1);
475 end = ALIGN(end, CACHE_LINE_SIZE);
476
477 /*
478 * Invalidate all full cache lines between 'start' and 'end'.
479 */
480 while (start < end) {
481 unsigned long range_end = calc_range_end(start, end);
482 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
483 AURORA_INVAL_RANGE_REG);
484 start = range_end;
485 }
486}
487
488static void aurora_clean_range(unsigned long start, unsigned long end)
489{
490 /*
491 * If L2 is forced to WT, the L2 will always be clean and we
492 * don't need to do anything here.
493 */
494 if (!l2_wt_override) {
495 start &= ~(CACHE_LINE_SIZE - 1);
496 end = ALIGN(end, CACHE_LINE_SIZE);
497 while (start != end) {
498 unsigned long range_end = calc_range_end(start, end);
499 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
500 AURORA_CLEAN_RANGE_REG);
501 start = range_end;
502 }
503 }
504}
505
506static void aurora_flush_range(unsigned long start, unsigned long end)
507{
508 if (!l2_wt_override) {
509 start &= ~(CACHE_LINE_SIZE - 1);
510 end = ALIGN(end, CACHE_LINE_SIZE);
511 while (start != end) {
512 unsigned long range_end = calc_range_end(start, end);
513 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
514 AURORA_FLUSH_RANGE_REG);
515 start = range_end;
516 }
517 }
518}
519
397static void __init l2x0_of_setup(const struct device_node *np, 520static void __init l2x0_of_setup(const struct device_node *np,
398 u32 *aux_val, u32 *aux_mask) 521 u32 *aux_val, u32 *aux_mask)
399{ 522{
@@ -491,9 +614,15 @@ static void __init pl310_save(void)
491 } 614 }
492} 615}
493 616
617static void aurora_save(void)
618{
619 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
620 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
621}
622
494static void l2x0_resume(void) 623static void l2x0_resume(void)
495{ 624{
496 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 625 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
497 /* restore aux ctrl and enable l2 */ 626 /* restore aux ctrl and enable l2 */
498 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 627 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
499 628
@@ -502,7 +631,7 @@ static void l2x0_resume(void)
502 631
503 l2x0_inv_all(); 632 l2x0_inv_all();
504 633
505 writel_relaxed(1, l2x0_base + L2X0_CTRL); 634 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
506 } 635 }
507} 636}
508 637
@@ -510,7 +639,7 @@ static void pl310_resume(void)
510{ 639{
511 u32 l2x0_revision; 640 u32 l2x0_revision;
512 641
513 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 642 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
514 /* restore pl310 setup */ 643 /* restore pl310 setup */
515 writel_relaxed(l2x0_saved_regs.tag_latency, 644 writel_relaxed(l2x0_saved_regs.tag_latency,
516 l2x0_base + L2X0_TAG_LATENCY_CTRL); 645 l2x0_base + L2X0_TAG_LATENCY_CTRL);
@@ -536,22 +665,108 @@ static void pl310_resume(void)
536 l2x0_resume(); 665 l2x0_resume();
537} 666}
538 667
668static void aurora_resume(void)
669{
670 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
671 writel(l2x0_saved_regs.aux_ctrl, l2x0_base + L2X0_AUX_CTRL);
672 writel(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
673 }
674}
675
676static void __init aurora_broadcast_l2_commands(void)
677{
678 __u32 u;
679 /* Enable Broadcasting of cache commands to L2*/
680 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
681 u |= AURORA_CTRL_FW; /* Set the FW bit */
682 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
683 isb();
684}
685
686static void __init aurora_of_setup(const struct device_node *np,
687 u32 *aux_val, u32 *aux_mask)
688{
689 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
690 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
691
692 of_property_read_u32(np, "cache-id-part",
693 &cache_id_part_number_from_dt);
694
695 /* Determine and save the write policy */
696 l2_wt_override = of_property_read_bool(np, "wt-override");
697
698 if (l2_wt_override) {
699 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
700 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
701 }
702
703 *aux_val &= ~mask;
704 *aux_val |= val;
705 *aux_mask &= ~mask;
706}
707
539static const struct l2x0_of_data pl310_data = { 708static const struct l2x0_of_data pl310_data = {
540 pl310_of_setup, 709 .setup = pl310_of_setup,
541 pl310_save, 710 .save = pl310_save,
542 pl310_resume, 711 .outer_cache = {
712 .resume = pl310_resume,
713 .inv_range = l2x0_inv_range,
714 .clean_range = l2x0_clean_range,
715 .flush_range = l2x0_flush_range,
716 .sync = l2x0_cache_sync,
717 .flush_all = l2x0_flush_all,
718 .inv_all = l2x0_inv_all,
719 .disable = l2x0_disable,
720 .set_debug = pl310_set_debug,
721 },
543}; 722};
544 723
545static const struct l2x0_of_data l2x0_data = { 724static const struct l2x0_of_data l2x0_data = {
546 l2x0_of_setup, 725 .setup = l2x0_of_setup,
547 NULL, 726 .save = NULL,
548 l2x0_resume, 727 .outer_cache = {
728 .resume = l2x0_resume,
729 .inv_range = l2x0_inv_range,
730 .clean_range = l2x0_clean_range,
731 .flush_range = l2x0_flush_range,
732 .sync = l2x0_cache_sync,
733 .flush_all = l2x0_flush_all,
734 .inv_all = l2x0_inv_all,
735 .disable = l2x0_disable,
736 },
737};
738
739static const struct l2x0_of_data aurora_with_outer_data = {
740 .setup = aurora_of_setup,
741 .save = aurora_save,
742 .outer_cache = {
743 .resume = aurora_resume,
744 .inv_range = aurora_inv_range,
745 .clean_range = aurora_clean_range,
746 .flush_range = aurora_flush_range,
747 .sync = l2x0_cache_sync,
748 .flush_all = l2x0_flush_all,
749 .inv_all = l2x0_inv_all,
750 .disable = l2x0_disable,
751 },
752};
753
754static const struct l2x0_of_data aurora_no_outer_data = {
755 .setup = aurora_of_setup,
756 .save = aurora_save,
757 .outer_cache = {
758 .resume = aurora_resume,
759 },
549}; 760};
550 761
551static const struct of_device_id l2x0_ids[] __initconst = { 762static const struct of_device_id l2x0_ids[] __initconst = {
552 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 763 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
553 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 764 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
554 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 765 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
766 { .compatible = "marvell,aurora-system-cache",
767 .data = (void *)&aurora_no_outer_data},
768 { .compatible = "marvell,aurora-outer-cache",
769 .data = (void *)&aurora_with_outer_data},
555 {} 770 {}
556}; 771};
557 772
@@ -577,17 +792,24 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
577 data = of_match_node(l2x0_ids, np)->data; 792 data = of_match_node(l2x0_ids, np)->data;
578 793
579 /* L2 configuration can only be changed if the cache is disabled */ 794 /* L2 configuration can only be changed if the cache is disabled */
580 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 795 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
581 if (data->setup) 796 if (data->setup)
582 data->setup(np, &aux_val, &aux_mask); 797 data->setup(np, &aux_val, &aux_mask);
798
799 /* For aurora cache in no outer mode select the
800 * correct mode using the coprocessor*/
801 if (data == &aurora_no_outer_data)
802 aurora_broadcast_l2_commands();
583 } 803 }
584 804
585 if (data->save) 805 if (data->save)
586 data->save(); 806 data->save();
587 807
808 of_init = true;
588 l2x0_init(l2x0_base, aux_val, aux_mask); 809 l2x0_init(l2x0_base, aux_val, aux_mask);
589 810
590 outer_cache.resume = data->resume; 811 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
812
591 return 0; 813 return 0;
592} 814}
593#endif 815#endif