aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-07-28 17:01:25 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-07-29 09:04:36 -0400
commit6775a558fece413376e1dacd435adb5fbe225f40 (patch)
treee4428d3915151d3718a9687f843ecec115aeb546 /arch
parente936771a76a7b61ca55a5142a3de835c2e196871 (diff)
ARM: 6272/1: Convert L2x0 to use the IO relaxed operations
This patch is in preparation for a subsequent patch which adds barriers to the I/O accessors. Since the mandatory barriers may do an L2 cache sync, this patch avoids a recursive call into l2x0_cache_sync() via the write*() accessors and wmb() and a call into l2x0_cache_sync() with the l2x0_lock held. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/cache-l2x0.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index df4955885b21..9982eb385c0f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
32static inline void cache_wait(void __iomem *reg, unsigned long mask) 32static inline void cache_wait(void __iomem *reg, unsigned long mask)
33{ 33{
34 /* wait for the operation to complete */ 34 /* wait for the operation to complete */
35 while (readl(reg) & mask) 35 while (readl_relaxed(reg) & mask)
36 ; 36 ;
37} 37}
38 38
39static inline void cache_sync(void) 39static inline void cache_sync(void)
40{ 40{
41 void __iomem *base = l2x0_base; 41 void __iomem *base = l2x0_base;
42 writel(0, base + L2X0_CACHE_SYNC); 42 writel_relaxed(0, base + L2X0_CACHE_SYNC);
43 cache_wait(base + L2X0_CACHE_SYNC, 1); 43 cache_wait(base + L2X0_CACHE_SYNC, 1);
44} 44}
45 45
@@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
47{ 47{
48 void __iomem *base = l2x0_base; 48 void __iomem *base = l2x0_base;
49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
50 writel(addr, base + L2X0_CLEAN_LINE_PA); 50 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
51} 51}
52 52
53static inline void l2x0_inv_line(unsigned long addr) 53static inline void l2x0_inv_line(unsigned long addr)
54{ 54{
55 void __iomem *base = l2x0_base; 55 void __iomem *base = l2x0_base;
56 cache_wait(base + L2X0_INV_LINE_PA, 1); 56 cache_wait(base + L2X0_INV_LINE_PA, 1);
57 writel(addr, base + L2X0_INV_LINE_PA); 57 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
58} 58}
59 59
60#ifdef CONFIG_PL310_ERRATA_588369 60#ifdef CONFIG_PL310_ERRATA_588369
@@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
75 75
76 /* Clean by PA followed by Invalidate by PA */ 76 /* Clean by PA followed by Invalidate by PA */
77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
78 writel(addr, base + L2X0_CLEAN_LINE_PA); 78 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
79 cache_wait(base + L2X0_INV_LINE_PA, 1); 79 cache_wait(base + L2X0_INV_LINE_PA, 1);
80 writel(addr, base + L2X0_INV_LINE_PA); 80 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
81} 81}
82#else 82#else
83 83
@@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
90{ 90{
91 void __iomem *base = l2x0_base; 91 void __iomem *base = l2x0_base;
92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 93 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
94} 94}
95#endif 95#endif
96 96
@@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
109 109
110 /* invalidate all ways */ 110 /* invalidate all ways */
111 spin_lock_irqsave(&l2x0_lock, flags); 111 spin_lock_irqsave(&l2x0_lock, flags);
112 writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 112 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
114 cache_sync(); 114 cache_sync();
115 spin_unlock_irqrestore(&l2x0_lock, flags); 115 spin_unlock_irqrestore(&l2x0_lock, flags);
@@ -215,8 +215,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
215 215
216 l2x0_base = base; 216 l2x0_base = base;
217 217
218 cache_id = readl(l2x0_base + L2X0_CACHE_ID); 218 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
219 aux = readl(l2x0_base + L2X0_AUX_CTRL); 219 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
220 220
221 aux &= aux_mask; 221 aux &= aux_mask;
222 aux |= aux_val; 222 aux |= aux_val;
@@ -248,15 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
248 * If you are booting from non-secure mode 248 * If you are booting from non-secure mode
249 * accessing the below registers will fault. 249 * accessing the below registers will fault.
250 */ 250 */
251 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 251 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
252 252
253 /* l2x0 controller is disabled */ 253 /* l2x0 controller is disabled */
254 writel(aux, l2x0_base + L2X0_AUX_CTRL); 254 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
255 255
256 l2x0_inv_all(); 256 l2x0_inv_all();
257 257
258 /* enable L2X0 */ 258 /* enable L2X0 */
259 writel(1, l2x0_base + L2X0_CTRL); 259 writel_relaxed(1, l2x0_base + L2X0_CTRL);
260 } 260 }
261 261
262 outer_cache.inv_range = l2x0_inv_range; 262 outer_cache.inv_range = l2x0_inv_range;