aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-05-09 04:30:51 -0400
committerVineet Gupta <vgupta@synopsys.com>2013-05-09 04:48:50 -0400
commita690984d60abcb627ce1bcc0300a14043b5e224a (patch)
tree07a4bc9e58bf8b7144cb4f6e0cb8a81efa03c8d6 /arch/arc/mm
parentc917a36f5fe551748eb37bd1efdcccc1045a02ab (diff)
ARC: [mm] refactor the core (i|d)cache line ops loops
Nothing semantical * simplify the alignement code by using & operation only * rename variables clearly as paddr Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/cache_arc700.c53
1 files changed, 24 insertions, 29 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index d48afebf9df5..8a760e2c0341 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -270,21 +270,20 @@ static inline void __dc_entire_op(const int cacheop)
270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete 270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
271 * It's sole purpose is to help gcc generate ZOL 271 * It's sole purpose is to help gcc generate ZOL
272 */ 272 */
273static inline void __dc_line_loop(unsigned long start, unsigned long sz, 273static inline void __dc_line_loop(unsigned long paddr, unsigned long sz,
274 int aux_reg) 274 int aux_reg)
275{ 275{
276 int num_lines, slack; 276 int num_lines;
277 277
278 /* Ensure we properly floor/ceil the non-line aligned/sized requests 278 /* Ensure we properly floor/ceil the non-line aligned/sized requests
279 * and have @start - aligned to cache line and integral @num_lines. 279 * and have @paddr - aligned to cache line and integral @num_lines.
280 * This however can be avoided for page sized since: 280 * This however can be avoided for page sized since:
281 * -@start will be cache-line aligned already (being page aligned) 281 * -@paddr will be cache-line aligned already (being page aligned)
282 * -@sz will be integral multiple of line size (being page sized). 282 * -@sz will be integral multiple of line size (being page sized).
283 */ 283 */
284 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 284 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
285 slack = start & ~DCACHE_LINE_MASK; 285 sz += paddr & ~DCACHE_LINE_MASK;
286 sz += slack; 286 paddr &= DCACHE_LINE_MASK;
287 start -= slack;
288 } 287 }
289 288
290 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); 289 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
@@ -298,17 +297,17 @@ static inline void __dc_line_loop(unsigned long start, unsigned long sz,
298 * doesn't support aliasing configs for D$, yet. 297 * doesn't support aliasing configs for D$, yet.
299 * Thus paddr is enough to provide both tag and index. 298 * Thus paddr is enough to provide both tag and index.
300 */ 299 */
301 write_aux_reg(ARC_REG_DC_PTAG, start); 300 write_aux_reg(ARC_REG_DC_PTAG, paddr);
302#endif 301#endif
303 write_aux_reg(aux_reg, start); 302 write_aux_reg(aux_reg, paddr);
304 start += ARC_DCACHE_LINE_LEN; 303 paddr += ARC_DCACHE_LINE_LEN;
305 } 304 }
306} 305}
307 306
308/* 307/*
309 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) 308 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
310 */ 309 */
311static inline void __dc_line_op(unsigned long start, unsigned long sz, 310static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
312 const int cacheop) 311 const int cacheop)
313{ 312{
314 unsigned long flags, tmp = tmp; 313 unsigned long flags, tmp = tmp;
@@ -332,7 +331,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
332 else 331 else
333 aux = ARC_REG_DC_FLDL; 332 aux = ARC_REG_DC_FLDL;
334 333
335 __dc_line_loop(start, sz, aux); 334 __dc_line_loop(paddr, sz, aux);
336 335
337 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 336 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
338 wait_for_flush(); 337 wait_for_flush();
@@ -347,7 +346,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
347#else 346#else
348 347
349#define __dc_entire_op(cacheop) 348#define __dc_entire_op(cacheop)
350#define __dc_line_op(start, sz, cacheop) 349#define __dc_line_op(paddr, sz, cacheop)
351 350
352#endif /* CONFIG_ARC_HAS_DCACHE */ 351#endif /* CONFIG_ARC_HAS_DCACHE */
353 352
@@ -399,49 +398,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
399/*********************************************************** 398/***********************************************************
400 * Machine specific helper for per line I-Cache invalidate. 399 * Machine specific helper for per line I-Cache invalidate.
401 */ 400 */
402static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, 401static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
403 unsigned long sz) 402 unsigned long sz)
404{ 403{
405 unsigned long flags; 404 unsigned long flags;
406 int num_lines, slack; 405 int num_lines;
407 unsigned int addr;
408 406
409 /* 407 /*
410 * Ensure we properly floor/ceil the non-line aligned/sized requests: 408 * Ensure we properly floor/ceil the non-line aligned/sized requests:
411 * However page sized flushes can be compile time optimised. 409 * However page sized flushes can be compile time optimised.
412 * -@phy_start will be cache-line aligned already (being page aligned) 410 * -@paddr will be cache-line aligned already (being page aligned)
413 * -@sz will be integral multiple of line size (being page sized). 411 * -@sz will be integral multiple of line size (being page sized).
414 */ 412 */
415 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 413 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
416 slack = phy_start & ~ICACHE_LINE_MASK; 414 sz += paddr & ~ICACHE_LINE_MASK;
417 sz += slack; 415 paddr &= ICACHE_LINE_MASK;
418 phy_start -= slack; 416 vaddr &= ICACHE_LINE_MASK;
419 } 417 }
420 418
421 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); 419 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
422 420
423#if (CONFIG_ARC_MMU_VER > 2) 421#if (CONFIG_ARC_MMU_VER <= 2)
424 vaddr &= ICACHE_LINE_MASK;
425 addr = phy_start;
426#else
427 /* bits 17:13 of vaddr go as bits 4:0 of paddr */ 422 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
428 addr = phy_start | ((vaddr >> 13) & 0x1F); 423 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
429#endif 424#endif
430 425
431 local_irq_save(flags); 426 local_irq_save(flags);
432 while (num_lines-- > 0) { 427 while (num_lines-- > 0) {
433#if (CONFIG_ARC_MMU_VER > 2) 428#if (CONFIG_ARC_MMU_VER > 2)
434 /* tag comes from phy addr */ 429 /* tag comes from phy addr */
435 write_aux_reg(ARC_REG_IC_PTAG, addr); 430 write_aux_reg(ARC_REG_IC_PTAG, paddr);
436 431
437 /* index bits come from vaddr */ 432 /* index bits come from vaddr */
438 write_aux_reg(ARC_REG_IC_IVIL, vaddr); 433 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
439 vaddr += ARC_ICACHE_LINE_LEN; 434 vaddr += ARC_ICACHE_LINE_LEN;
440#else 435#else
441 /* paddr contains stuffed vaddrs bits */ 436 /* paddr contains stuffed vaddrs bits */
442 write_aux_reg(ARC_REG_IC_IVIL, addr); 437 write_aux_reg(ARC_REG_IC_IVIL, paddr);
443#endif 438#endif
444 addr += ARC_ICACHE_LINE_LEN; 439 paddr += ARC_ICACHE_LINE_LEN;
445 } 440 }
446 local_irq_restore(flags); 441 local_irq_restore(flags);
447} 442}