aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-03-15 17:29:28 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-05-29 19:48:24 -0400
commit733c6bbafdfac62a307ed5ca925889343c5635ac (patch)
treec8d6b72b38d8eda28ebc678c91ed2a9bfc7e84dd /arch/arm/mm
parentf777332ba7ae42c396b7aabc20bdbeeebb3a63c0 (diff)
ARM: l2c: add L2C-220 specific handlers
The L2C-220 is different from the L2C-210 and L2C-310 in that every operation is a background operation: this means we have to use spinlocks to protect all operations, and we have to wait for every operation to complete. Should a second operation be attempted while a previous operation is in progress, the response will be an imprecise abort. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c167
1 files changed, 157 insertions, 10 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 49ddff972cb3..751c3d7a22b3 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -491,6 +491,148 @@ static const struct l2c_init_data l2c210_data __initconst = {
491}; 491};
492 492
493/* 493/*
494 * L2C-220 specific code.
495 *
496 * All operations are background operations: they have to be waited for.
497 * Conflicting requests generate a slave error (which will cause an
498 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
499 * sync register here.
500 *
501 * However, we can re-use the l2c210_resume call.
502 */
503static inline void __l2c220_cache_sync(void __iomem *base)
504{
505 writel_relaxed(0, base + L2X0_CACHE_SYNC);
506 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
507}
508
509static void l2c220_op_way(void __iomem *base, unsigned reg)
510{
511 unsigned long flags;
512
513 raw_spin_lock_irqsave(&l2x0_lock, flags);
514 __l2c_op_way(base + reg);
515 __l2c220_cache_sync(base);
516 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
517}
518
519static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
520 unsigned long end, unsigned long flags)
521{
522 raw_spinlock_t *lock = &l2x0_lock;
523
524 while (start < end) {
525 unsigned long blk_end = start + min(end - start, 4096UL);
526
527 while (start < blk_end) {
528 l2c_wait_mask(reg, 1);
529 writel_relaxed(start, reg);
530 start += CACHE_LINE_SIZE;
531 }
532
533 if (blk_end < end) {
534 raw_spin_unlock_irqrestore(lock, flags);
535 raw_spin_lock_irqsave(lock, flags);
536 }
537 }
538
539 return flags;
540}
541
542static void l2c220_inv_range(unsigned long start, unsigned long end)
543{
544 void __iomem *base = l2x0_base;
545 unsigned long flags;
546
547 raw_spin_lock_irqsave(&l2x0_lock, flags);
548 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
549 if (start & (CACHE_LINE_SIZE - 1)) {
550 start &= ~(CACHE_LINE_SIZE - 1);
551 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
552 start += CACHE_LINE_SIZE;
553 }
554
555 if (end & (CACHE_LINE_SIZE - 1)) {
556 end &= ~(CACHE_LINE_SIZE - 1);
557 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
558 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
559 }
560 }
561
562 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
563 start, end, flags);
564 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
565 __l2c220_cache_sync(base);
566 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
567}
568
569static void l2c220_clean_range(unsigned long start, unsigned long end)
570{
571 void __iomem *base = l2x0_base;
572 unsigned long flags;
573
574 start &= ~(CACHE_LINE_SIZE - 1);
575 if ((end - start) >= l2x0_size) {
576 l2c220_op_way(base, L2X0_CLEAN_WAY);
577 return;
578 }
579
580 raw_spin_lock_irqsave(&l2x0_lock, flags);
581 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
582 start, end, flags);
583 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
584 __l2c220_cache_sync(base);
585 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
586}
587
588static void l2c220_flush_range(unsigned long start, unsigned long end)
589{
590 void __iomem *base = l2x0_base;
591 unsigned long flags;
592
593 start &= ~(CACHE_LINE_SIZE - 1);
594 if ((end - start) >= l2x0_size) {
595 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
596 return;
597 }
598
599 raw_spin_lock_irqsave(&l2x0_lock, flags);
600 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
601 start, end, flags);
602 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
603 __l2c220_cache_sync(base);
604 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
605}
606
607static void l2c220_flush_all(void)
608{
609 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
610}
611
612static void l2c220_sync(void)
613{
614 unsigned long flags;
615
616 raw_spin_lock_irqsave(&l2x0_lock, flags);
617 __l2c220_cache_sync(l2x0_base);
618 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
619}
620
621static const struct l2c_init_data l2c220_data = {
622 .num_lock = 1,
623 .enable = l2c_enable,
624 .outer_cache = {
625 .inv_range = l2c220_inv_range,
626 .clean_range = l2c220_clean_range,
627 .flush_range = l2c220_flush_range,
628 .flush_all = l2c220_flush_all,
629 .disable = l2c_disable,
630 .sync = l2c220_sync,
631 .resume = l2c210_resume,
632 },
633};
634
635/*
494 * L2C-310 specific code. 636 * L2C-310 specific code.
495 * 637 *
496 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 638 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
@@ -831,6 +973,10 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
831 data = &l2c210_data; 973 data = &l2c210_data;
832 break; 974 break;
833 975
976 case L2X0_CACHE_ID_PART_L220:
977 data = &l2c220_data;
978 break;
979
834 case L2X0_CACHE_ID_PART_L310: 980 case L2X0_CACHE_ID_PART_L310:
835 data = &l2c310_init_fns; 981 data = &l2c310_init_fns;
836 break; 982 break;
@@ -895,17 +1041,18 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
895 }, 1041 },
896}; 1042};
897 1043
898static const struct l2c_init_data of_l2x0_data __initconst = { 1044static const struct l2c_init_data of_l2c220_data __initconst = {
1045 .num_lock = 1,
899 .of_parse = l2x0_of_parse, 1046 .of_parse = l2x0_of_parse,
900 .enable = l2x0_enable, 1047 .enable = l2c_enable,
901 .outer_cache = { 1048 .outer_cache = {
902 .inv_range = l2x0_inv_range, 1049 .inv_range = l2c220_inv_range,
903 .clean_range = l2x0_clean_range, 1050 .clean_range = l2c220_clean_range,
904 .flush_range = l2x0_flush_range, 1051 .flush_range = l2c220_flush_range,
905 .flush_all = l2x0_flush_all, 1052 .flush_all = l2c220_flush_all,
906 .disable = l2x0_disable, 1053 .disable = l2c_disable,
907 .sync = l2x0_cache_sync, 1054 .sync = l2c220_sync,
908 .resume = l2x0_resume, 1055 .resume = l2c210_resume,
909 }, 1056 },
910}; 1057};
911 1058
@@ -1342,7 +1489,7 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
1342#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1489#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1343static const struct of_device_id l2x0_ids[] __initconst = { 1490static const struct of_device_id l2x0_ids[] __initconst = {
1344 L2C_ID("arm,l210-cache", of_l2c210_data), 1491 L2C_ID("arm,l210-cache", of_l2c210_data),
1345 L2C_ID("arm,l220-cache", of_l2x0_data), 1492 L2C_ID("arm,l220-cache", of_l2c220_data),
1346 L2C_ID("arm,pl310-cache", of_l2c310_data), 1493 L2C_ID("arm,pl310-cache", of_l2c310_data),
1347 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1494 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1348 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1495 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),