aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2017-08-03 08:15:44 -0400
committerVineet Gupta <vgupta@synopsys.com>2017-08-04 04:26:35 -0400
commitb5ddb6d54729d814356937572d6c9b599f10c29f (patch)
tree122ddb43a2f0a0472237e20114ea78b1db5ff7a3
parent7d79cee2c6540ea64dd917a14e2fd63d4ac3d3c0 (diff)
ARCv2: PAE40: set MSB even if !CONFIG_ARC_HAS_PAE40 but PAE exists in SoC
PAE40 confiuration in hardware extends some of the address registers for TLB/cache ops to 2 words. So far kernel was NOT setting the higher word if feature was not enabled in software which is wrong. Those need to be set to 0 in such case. Normally this would be done in the cache flush / tlb ops, however since these registers only exist conditionally, this would have to be conditional to a flag being set on boot which is expensive/ugly - specially for the more common case of PAE exists but not in use. Optimize that by zero'ing them once at boot - nobody will write to them afterwards Cc: stable@vger.kernel.org #4.4+ Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/mm/cache.c34
-rw-r--r--arch/arc/mm/tlb.c12
3 files changed, 41 insertions, 7 deletions
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index db7319e9b506..efb79fafff1d 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
94 return IS_ENABLED(CONFIG_ARC_HAS_PAE40); 94 return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
95} 95}
96 96
97extern int pae40_exist_but_not_enab(void);
98
97#endif /* !__ASSEMBLY__ */ 99#endif /* !__ASSEMBLY__ */
98 100
99#endif 101#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 874913b3e826..7db283b46ebd 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -1123,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
1123 __dc_enable(); 1123 __dc_enable();
1124} 1124}
1125 1125
1126/*
1127 * Cache related boot time checks/setups only needed on master CPU:
1128 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1129 * Assume SMP only, so all cores will have same cache config. A check on
1130 * one core suffices for all
1131 * - IOC setup / dma callbacks only need to be done once
1132 */
1126void __init arc_cache_init_master(void) 1133void __init arc_cache_init_master(void)
1127{ 1134{
1128 unsigned int __maybe_unused cpu = smp_processor_id(); 1135 unsigned int __maybe_unused cpu = smp_processor_id();
@@ -1202,12 +1209,27 @@ void __ref arc_cache_init(void)
1202 1209
1203 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 1210 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1204 1211
1205 /*
1206 * Only master CPU needs to execute rest of function:
1207 * - Assume SMP so all cores will have same cache config so
1208 * any geomtry checks will be same for all
1209 * - IOC setup / dma callbacks only need to be setup once
1210 */
1211 if (!cpu) 1212 if (!cpu)
1212 arc_cache_init_master(); 1213 arc_cache_init_master();
1214
1215 /*
1216 * In PAE regime, TLB and cache maintenance ops take wider addresses
1217 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1218 * to be zeroed to keep the ops sane.
1219 * As an optimization for more common !PAE enabled case, zero them out
1220 * once at init, rather than checking/setting to 0 for every runtime op
1221 */
1222 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1223
1224 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1225 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1226
1227 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1228 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1229
1230 if (l2_line_sz) {
1231 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1232 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1233 }
1234 }
1213} 1235}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index d0126fdfe2d8..b181f3ee38aa 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -104,6 +104,8 @@
104/* A copy of the ASID from the PID reg is kept in asid_cache */ 104/* A copy of the ASID from the PID reg is kept in asid_cache */
105DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; 105DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
106 106
107static int __read_mostly pae_exists;
108
107/* 109/*
108 * Utility Routine to erase a J-TLB entry 110 * Utility Routine to erase a J-TLB entry
109 * Caller needs to setup Index Reg (manually or via getIndex) 111 * Caller needs to setup Index Reg (manually or via getIndex)
@@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
784 mmu->u_dtlb = mmu4->u_dtlb * 4; 786 mmu->u_dtlb = mmu4->u_dtlb * 4;
785 mmu->u_itlb = mmu4->u_itlb * 4; 787 mmu->u_itlb = mmu4->u_itlb * 4;
786 mmu->sasid = mmu4->sasid; 788 mmu->sasid = mmu4->sasid;
787 mmu->pae = mmu4->pae; 789 pae_exists = mmu->pae = mmu4->pae;
788 } 790 }
789} 791}
790 792
@@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
809 return buf; 811 return buf;
810} 812}
811 813
814int pae40_exist_but_not_enab(void)
815{
816 return pae_exists && !is_pae40_enabled();
817}
818
812void arc_mmu_init(void) 819void arc_mmu_init(void)
813{ 820{
814 char str[256]; 821 char str[256];
@@ -859,6 +866,9 @@ void arc_mmu_init(void)
859 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ 866 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
860 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); 867 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
861#endif 868#endif
869
870 if (pae40_exist_but_not_enab())
871 write_aux_reg(ARC_REG_TLBPD1HI, 0);
862} 872}
863 873
864/* 874/*