aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/cache_arc700.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/cache_arc700.c')
-rw-r--r--arch/arc/mm/cache_arc700.c155
1 files changed, 65 insertions, 90 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 5a1259cd948c..6b58c1de7577 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -182,7 +182,7 @@ void arc_cache_init(void)
182 182
183#ifdef CONFIG_ARC_HAS_ICACHE 183#ifdef CONFIG_ARC_HAS_ICACHE
184 /* 1. Confirm some of I-cache params which Linux assumes */ 184 /* 1. Confirm some of I-cache params which Linux assumes */
185 if (ic->line_len != ARC_ICACHE_LINE_LEN) 185 if (ic->line_len != L1_CACHE_BYTES)
186 panic("Cache H/W doesn't match kernel Config"); 186 panic("Cache H/W doesn't match kernel Config");
187 187
188 if (ic->ver != CONFIG_ARC_MMU_VER) 188 if (ic->ver != CONFIG_ARC_MMU_VER)
@@ -205,7 +205,7 @@ chk_dc:
205 return; 205 return;
206 206
207#ifdef CONFIG_ARC_HAS_DCACHE 207#ifdef CONFIG_ARC_HAS_DCACHE
208 if (dc->line_len != ARC_DCACHE_LINE_LEN) 208 if (dc->line_len != L1_CACHE_BYTES)
209 panic("Cache H/W doesn't match kernel Config"); 209 panic("Cache H/W doesn't match kernel Config");
210 210
211 /* check for D-Cache aliasing */ 211 /* check for D-Cache aliasing */
@@ -240,6 +240,67 @@ chk_dc:
240#define OP_INV 0x1 240#define OP_INV 0x1
241#define OP_FLUSH 0x2 241#define OP_FLUSH 0x2
242#define OP_FLUSH_N_INV 0x3 242#define OP_FLUSH_N_INV 0x3
243#define OP_INV_IC 0x4
244
245/*
246 * Common Helper for Line Operations on {I,D}-Cache
247 */
248static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
249 unsigned long sz, const int cacheop)
250{
251 unsigned int aux_cmd, aux_tag;
252 int num_lines;
253 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
254
255 if (cacheop == OP_INV_IC) {
256 aux_cmd = ARC_REG_IC_IVIL;
257 aux_tag = ARC_REG_IC_PTAG;
258 }
259 else {
260 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
261 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
262 aux_tag = ARC_REG_DC_PTAG;
263 }
264
265 /* Ensure we properly floor/ceil the non-line aligned/sized requests
266 * and have @paddr - aligned to cache line and integral @num_lines.
267 * This however can be avoided for page sized since:
268 * -@paddr will be cache-line aligned already (being page aligned)
269 * -@sz will be integral multiple of line size (being page sized).
270 */
271 if (!full_page_op) {
272 sz += paddr & ~CACHE_LINE_MASK;
273 paddr &= CACHE_LINE_MASK;
274 vaddr &= CACHE_LINE_MASK;
275 }
276
277 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
278
279#if (CONFIG_ARC_MMU_VER <= 2)
280 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
281 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
282#else
283 /* if V-P const for loop, PTAG can be written once outside loop */
284 if (full_page_op)
285 write_aux_reg(ARC_REG_DC_PTAG, paddr);
286#endif
287
288 while (num_lines-- > 0) {
289#if (CONFIG_ARC_MMU_VER > 2)
290 /* MMUv3, cache ops require paddr seperately */
291 if (!full_page_op) {
292 write_aux_reg(aux_tag, paddr);
293 paddr += L1_CACHE_BYTES;
294 }
295
296 write_aux_reg(aux_cmd, vaddr);
297 vaddr += L1_CACHE_BYTES;
298#else
299 write_aux_reg(aux, paddr);
300 paddr += L1_CACHE_BYTES;
301#endif
302 }
303}
243 304
244#ifdef CONFIG_ARC_HAS_DCACHE 305#ifdef CONFIG_ARC_HAS_DCACHE
245 306
@@ -289,53 +350,6 @@ static inline void __dc_entire_op(const int cacheop)
289 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); 350 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
290} 351}
291 352
292/*
293 * Per Line Operation on D-Cache
294 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
295 * It's sole purpose is to help gcc generate ZOL
296 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
297 */
298static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
299 unsigned long sz, const int aux_reg)
300{
301 int num_lines;
302
303 /* Ensure we properly floor/ceil the non-line aligned/sized requests
304 * and have @paddr - aligned to cache line and integral @num_lines.
305 * This however can be avoided for page sized since:
306 * -@paddr will be cache-line aligned already (being page aligned)
307 * -@sz will be integral multiple of line size (being page sized).
308 */
309 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
310 sz += paddr & ~DCACHE_LINE_MASK;
311 paddr &= DCACHE_LINE_MASK;
312 vaddr &= DCACHE_LINE_MASK;
313 }
314
315 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
316
317#if (CONFIG_ARC_MMU_VER <= 2)
318 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
319#endif
320
321 while (num_lines-- > 0) {
322#if (CONFIG_ARC_MMU_VER > 2)
323 /*
324 * Just as for I$, in MMU v3, D$ ops also require
325 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
326 */
327 write_aux_reg(ARC_REG_DC_PTAG, paddr);
328
329 write_aux_reg(aux_reg, vaddr);
330 vaddr += ARC_DCACHE_LINE_LEN;
331#else
332 /* paddr contains stuffed vaddrs bits */
333 write_aux_reg(aux_reg, paddr);
334#endif
335 paddr += ARC_DCACHE_LINE_LEN;
336 }
337}
338
339/* For kernel mappings cache operation: index is same as paddr */ 353/* For kernel mappings cache operation: index is same as paddr */
340#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 354#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
341 355
@@ -346,7 +360,6 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
346 unsigned long sz, const int cacheop) 360 unsigned long sz, const int cacheop)
347{ 361{
348 unsigned long flags, tmp = tmp; 362 unsigned long flags, tmp = tmp;
349 int aux;
350 363
351 local_irq_save(flags); 364 local_irq_save(flags);
352 365
@@ -361,12 +374,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
361 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); 374 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
362 } 375 }
363 376
364 if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */ 377 __cache_line_loop(paddr, vaddr, sz, cacheop);
365 aux = ARC_REG_DC_IVDL;
366 else
367 aux = ARC_REG_DC_FLDL;
368
369 __dc_line_loop(paddr, vaddr, sz, aux);
370 378
371 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 379 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
372 wait_for_flush(); 380 wait_for_flush();
@@ -438,42 +446,9 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
438 unsigned long sz) 446 unsigned long sz)
439{ 447{
440 unsigned long flags; 448 unsigned long flags;
441 int num_lines;
442
443 /*
444 * Ensure we properly floor/ceil the non-line aligned/sized requests:
445 * However page sized flushes can be compile time optimised.
446 * -@paddr will be cache-line aligned already (being page aligned)
447 * -@sz will be integral multiple of line size (being page sized).
448 */
449 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
450 sz += paddr & ~ICACHE_LINE_MASK;
451 paddr &= ICACHE_LINE_MASK;
452 vaddr &= ICACHE_LINE_MASK;
453 }
454
455 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
456
457#if (CONFIG_ARC_MMU_VER <= 2)
458 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
459 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
460#endif
461 449
462 local_irq_save(flags); 450 local_irq_save(flags);
463 while (num_lines-- > 0) { 451 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
464#if (CONFIG_ARC_MMU_VER > 2)
465 /* tag comes from phy addr */
466 write_aux_reg(ARC_REG_IC_PTAG, paddr);
467
468 /* index bits come from vaddr */
469 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
470 vaddr += ARC_ICACHE_LINE_LEN;
471#else
472 /* paddr contains stuffed vaddrs bits */
473 write_aux_reg(ARC_REG_IC_IVIL, paddr);
474#endif
475 paddr += ARC_ICACHE_LINE_LEN;
476 }
477 local_irq_restore(flags); 452 local_irq_restore(flags);
478} 453}
479 454