aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/cache.c')
-rw-r--r--arch/arc/mm/cache.c50
1 files changed, 42 insertions, 8 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index a867575a758b..7db283b46ebd 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
665 static DEFINE_SPINLOCK(lock); 665 static DEFINE_SPINLOCK(lock);
666 unsigned long flags; 666 unsigned long flags;
667 unsigned int ctrl; 667 unsigned int ctrl;
668 phys_addr_t end;
668 669
669 spin_lock_irqsave(&lock, flags); 670 spin_lock_irqsave(&lock, flags);
670 671
@@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
694 * END needs to be setup before START (latter triggers the operation) 695 * END needs to be setup before START (latter triggers the operation)
695 * END can't be same as START, so add (l2_line_sz - 1) to sz 696 * END can't be same as START, so add (l2_line_sz - 1) to sz
696 */ 697 */
697 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); 698 end = paddr + sz + l2_line_sz - 1;
698 write_aux_reg(ARC_REG_SLC_RGN_START, paddr); 699 if (is_pae40_enabled())
700 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
701
702 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
703
704 if (is_pae40_enabled())
705 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
706
707 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
708
709 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
710 read_aux_reg(ARC_REG_SLC_CTRL);
699 711
700 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 712 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
701 713
@@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
1111 __dc_enable(); 1123 __dc_enable();
1112} 1124}
1113 1125
1126/*
1127 * Cache related boot time checks/setups only needed on master CPU:
1128 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1129 * Assume SMP only, so all cores will have same cache config. A check on
1130 * one core suffices for all
1131 * - IOC setup / dma callbacks only need to be done once
1132 */
1114void __init arc_cache_init_master(void) 1133void __init arc_cache_init_master(void)
1115{ 1134{
1116 unsigned int __maybe_unused cpu = smp_processor_id(); 1135 unsigned int __maybe_unused cpu = smp_processor_id();
@@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
1190 1209
1191 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 1210 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1192 1211
1193 /*
1194 * Only master CPU needs to execute rest of function:
1195 * - Assume SMP so all cores will have same cache config so
1196 * any geomtry checks will be same for all
1197 * - IOC setup / dma callbacks only need to be setup once
1198 */
1199 if (!cpu) 1212 if (!cpu)
1200 arc_cache_init_master(); 1213 arc_cache_init_master();
1214
1215 /*
1216 * In PAE regime, TLB and cache maintenance ops take wider addresses
1217 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1218 * to be zeroed to keep the ops sane.
1219 * As an optimization for more common !PAE enabled case, zero them out
1220 * once at init, rather than checking/setting to 0 for every runtime op
1221 */
1222 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1223
1224 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1225 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1226
1227 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1228 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1229
1230 if (l2_line_sz) {
1231 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1232 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1233 }
1234 }
1201} 1235}