aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/cache.c
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-12-19 14:24:08 -0500
committerVineet Gupta <vgupta@synopsys.com>2016-12-19 14:54:41 -0500
commitf64915be2d8c629e7b55ad37f90bd8db2713426e (patch)
treeac9446e343088fa29b48c777bcc4f9b5500d13f3 /arch/arc/mm/cache.c
parent983eeba7d2a854b540bd25c9e2311778408d9730 (diff)
ARC: mm: No need to save cache version in @cpuinfo
Historical MMU revisions have been paired with Cache revision updates which are captured in MMU and Cache Build Configuration Registers respectively. This was used in boot code to check for configurations mismatches, speically in simulations (such as running with non existent caches, non pairing MMU and Cache version etc). This can instead be inferred from other cache params such as line size. So remove @ver from post processed @cpuinfo which could be used later to save soem other interesting info. Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/mm/cache.c')
-rw-r--r--arch/arc/mm/cache.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 50d71695cd4e..a0ce8ff1e59c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -40,7 +40,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
40 struct cpuinfo_arc_cache *p; 40 struct cpuinfo_arc_cache *p;
41 41
42#define PR_CACHE(p, cfg, str) \ 42#define PR_CACHE(p, cfg, str) \
43 if (!(p)->ver) \ 43 if (!(p)->line_len) \
44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
45 else \ 45 else \
46 n += scnprintf(buf + n, len - n, \ 46 n += scnprintf(buf + n, len - n, \
@@ -54,7 +54,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
55 55
56 p = &cpuinfo_arc700[c].slc; 56 p = &cpuinfo_arc700[c].slc;
57 if (p->ver) 57 if (p->line_len)
58 n += scnprintf(buf + n, len - n, 58 n += scnprintf(buf + n, len - n,
59 "SLC\t\t: %uK, %uB Line%s\n", 59 "SLC\t\t: %uK, %uB Line%s\n",
60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); 60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
@@ -104,7 +104,6 @@ static void read_decode_cache_bcr_arcv2(int cpu)
104 READ_BCR(ARC_REG_SLC_BCR, sbcr); 104 READ_BCR(ARC_REG_SLC_BCR, sbcr);
105 if (sbcr.ver) { 105 if (sbcr.ver) {
106 READ_BCR(ARC_REG_SLC_CFG, slc_cfg); 106 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
107 p_slc->ver = sbcr.ver;
108 p_slc->sz_k = 128 << slc_cfg.sz; 107 p_slc->sz_k = 128 << slc_cfg.sz;
109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; 108 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
110 } 109 }
@@ -152,7 +151,6 @@ void read_decode_cache_bcr(void)
152 151
153 p_ic->line_len = 8 << ibcr.line_len; 152 p_ic->line_len = 8 << ibcr.line_len;
154 p_ic->sz_k = 1 << (ibcr.sz - 1); 153 p_ic->sz_k = 1 << (ibcr.sz - 1);
155 p_ic->ver = ibcr.ver;
156 p_ic->vipt = 1; 154 p_ic->vipt = 1;
157 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; 155 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
158 156
@@ -176,7 +174,6 @@ dc_chk:
176 174
177 p_dc->line_len = 16 << dbcr.line_len; 175 p_dc->line_len = 16 << dbcr.line_len;
178 p_dc->sz_k = 1 << (dbcr.sz - 1); 176 p_dc->sz_k = 1 << (dbcr.sz - 1);
179 p_dc->ver = dbcr.ver;
180 177
181slc_chk: 178slc_chk:
182 if (is_isa_arcv2()) 179 if (is_isa_arcv2())
@@ -945,17 +942,13 @@ void arc_cache_init(void)
945 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 942 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
946 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 943 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
947 944
948 if (!ic->ver) 945 if (!ic->line_len)
949 panic("cache support enabled but non-existent cache\n"); 946 panic("cache support enabled but non-existent cache\n");
950 947
951 if (ic->line_len != L1_CACHE_BYTES) 948 if (ic->line_len != L1_CACHE_BYTES)
952 panic("ICache line [%d] != kernel Config [%d]", 949 panic("ICache line [%d] != kernel Config [%d]",
953 ic->line_len, L1_CACHE_BYTES); 950 ic->line_len, L1_CACHE_BYTES);
954 951
955 if (ic->ver != CONFIG_ARC_MMU_VER)
956 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
957 ic->ver, CONFIG_ARC_MMU_VER);
958
959 /* 952 /*
960 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG 953 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
961 * pair to provide vaddr/paddr respectively, just as in MMU v3 954 * pair to provide vaddr/paddr respectively, just as in MMU v3
@@ -969,7 +962,7 @@ void arc_cache_init(void)
969 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { 962 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
970 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 963 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
971 964
972 if (!dc->ver) 965 if (!dc->line_len)
973 panic("cache support enabled but non-existent cache\n"); 966 panic("cache support enabled but non-existent cache\n");
974 967
975 if (dc->line_len != L1_CACHE_BYTES) 968 if (dc->line_len != L1_CACHE_BYTES)