aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2015-04-03 05:37:07 -0400
committerVineet Gupta <vgupta@synopsys.com>2015-06-24 20:30:19 -0400
commit795f4558562fd5318260d5d8144a2f8612aeda7b (patch)
treeb4cb8211acf56f2f8acc7ef1429cee4e667f2834
parenta5c8b52abe677977883655166796f167ef1e0084 (diff)
ARCv2: SLC: Handle explcit flush for DMA ops (w/o IO-coherency)
L2 cache on ARCHS processors is called SLC (System Level Cache) For working DMA (in absence of hardware assisted IO Coherency) we need to manage SLC explicitly when buffers transition between cpu and controllers. Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r--arch/arc/include/asm/cache.h11
-rw-r--r--arch/arc/mm/cache.c64
-rw-r--r--arch/arc/mm/dma.c12
3 files changed, 85 insertions, 2 deletions
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index d21c76d6b054..d67345d3e2d4 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -82,5 +82,16 @@ extern void read_decode_cache_bcr(void);
82 82
83/*System-level cache (L2 cache) related Auxiliary registers */ 83/*System-level cache (L2 cache) related Auxiliary registers */
84#define ARC_REG_SLC_CFG 0x901 84#define ARC_REG_SLC_CFG 0x901
85#define ARC_REG_SLC_CTRL 0x903
86#define ARC_REG_SLC_FLUSH 0x904
87#define ARC_REG_SLC_INVALIDATE 0x905
88#define ARC_REG_SLC_RGN_START 0x914
89#define ARC_REG_SLC_RGN_END 0x916
90
91/* Bit val in SLC_CONTROL */
92#define SLC_CTRL_IM 0x040
93#define SLC_CTRL_DISABLE 0x001
94#define SLC_CTRL_BUSY 0x100
95#define SLC_CTRL_RGN_OP_INV 0x200
85 96
86#endif /* _ASM_CACHE_H */ 97#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 0eaaee60fd0b..b29d62ed4f7e 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -21,6 +21,8 @@
21#include <asm/cachectl.h> 21#include <asm/cachectl.h>
22#include <asm/setup.h> 22#include <asm/setup.h>
23 23
24static int l2_line_sz;
25
24void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, 26void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
25 unsigned long sz, const int cacheop); 27 unsigned long sz, const int cacheop);
26 28
@@ -120,13 +122,16 @@ dc_chk:
120 p_dc->ver = dbcr.ver; 122 p_dc->ver = dbcr.ver;
121 123
122slc_chk: 124slc_chk:
125 if (!is_isa_arcv2())
126 return;
127
123 p_slc = &cpuinfo_arc700[cpu].slc; 128 p_slc = &cpuinfo_arc700[cpu].slc;
124 READ_BCR(ARC_REG_SLC_BCR, sbcr); 129 READ_BCR(ARC_REG_SLC_BCR, sbcr);
125 if (sbcr.ver) { 130 if (sbcr.ver) {
126 READ_BCR(ARC_REG_SLC_CFG, slc_cfg); 131 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
127 p_slc->ver = sbcr.ver; 132 p_slc->ver = sbcr.ver;
128 p_slc->sz_k = 128 << slc_cfg.sz; 133 p_slc->sz_k = 128 << slc_cfg.sz;
129 p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; 134 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
130 } 135 }
131} 136}
132 137
@@ -460,6 +465,53 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
460 465
461#endif /* CONFIG_ARC_HAS_ICACHE */ 466#endif /* CONFIG_ARC_HAS_ICACHE */
462 467
468noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
469{
470#ifdef CONFIG_ISA_ARCV2
471 unsigned long flags;
472 unsigned int ctrl;
473
474 local_irq_save(flags);
475
476 /*
477 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
478 * - b'000 (default) is Flush,
479 * - b'001 is Invalidate if CTRL.IM == 0
480 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
481 */
482 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
483
484 /* Don't rely on default value of IM bit */
485 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
486 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
487 else
488 ctrl |= SLC_CTRL_IM;
489
490 if (op & OP_INV)
491 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
492 else
493 ctrl &= ~SLC_CTRL_RGN_OP_INV;
494
495 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
496
497 /*
498 * Lower bits are ignored, no need to clip
499 * END needs to be setup before START (latter triggers the operation)
500 * END can't be same as START, so add (l2_line_sz - 1) to sz
501 */
502 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
503 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
504
505 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
506
507 local_irq_restore(flags);
508#endif
509}
510
511static inline int need_slc_flush(void)
512{
513 return is_isa_arcv2() && l2_line_sz;
514}
463 515
464/*********************************************************** 516/***********************************************************
465 * Exported APIs 517 * Exported APIs
@@ -509,22 +561,30 @@ void flush_dcache_page(struct page *page)
509} 561}
510EXPORT_SYMBOL(flush_dcache_page); 562EXPORT_SYMBOL(flush_dcache_page);
511 563
512
513void dma_cache_wback_inv(unsigned long start, unsigned long sz) 564void dma_cache_wback_inv(unsigned long start, unsigned long sz)
514{ 565{
515 __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 566 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
567
568 if (need_slc_flush())
569 slc_op(start, sz, OP_FLUSH_N_INV);
516} 570}
517EXPORT_SYMBOL(dma_cache_wback_inv); 571EXPORT_SYMBOL(dma_cache_wback_inv);
518 572
519void dma_cache_inv(unsigned long start, unsigned long sz) 573void dma_cache_inv(unsigned long start, unsigned long sz)
520{ 574{
521 __dc_line_op_k(start, sz, OP_INV); 575 __dc_line_op_k(start, sz, OP_INV);
576
577 if (need_slc_flush())
578 slc_op(start, sz, OP_INV);
522} 579}
523EXPORT_SYMBOL(dma_cache_inv); 580EXPORT_SYMBOL(dma_cache_inv);
524 581
525void dma_cache_wback(unsigned long start, unsigned long sz) 582void dma_cache_wback(unsigned long start, unsigned long sz)
526{ 583{
527 __dc_line_op_k(start, sz, OP_FLUSH); 584 __dc_line_op_k(start, sz, OP_FLUSH);
585
586 if (need_slc_flush())
587 slc_op(start, sz, OP_FLUSH);
528} 588}
529EXPORT_SYMBOL(dma_cache_wback); 589EXPORT_SYMBOL(dma_cache_wback);
530 590
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 2cfe81dca92a..74a637a1cfc4 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -66,6 +66,18 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
66 /* This is bus address, platform dependent */ 66 /* This is bus address, platform dependent */
67 *dma_handle = (dma_addr_t)paddr; 67 *dma_handle = (dma_addr_t)paddr;
68 68
69 /*
70 * Evict any existing L1 and/or L2 lines for the backing page
71 * in case it was used earlier as a normal "cached" page.
72 * Yeah this bit us - STAR 9000898266
73 *
74 * Although core does call flush_cache_vmap(), it gets kvaddr hence
75 * can't be used to efficiently flush L1 and/or L2 which need paddr
76 * Currently flush_cache_vmap nukes the L1 cache completely which
77 * will be optimized as a separate commit
78 */
79 dma_cache_wback_inv((unsigned long)paddr, size);
80
69 return kvaddr; 81 return kvaddr;
70} 82}
71EXPORT_SYMBOL(dma_alloc_coherent); 83EXPORT_SYMBOL(dma_alloc_coherent);