aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-l2x0.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-03 09:44:46 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:14 -0400
commitbd31b85960a7fcb2d7ede216460b8da71a88411c (patch)
treef2ab1a1105705856c5cdfc71bcf3f7b5f897d30d /arch/arm/mm/cache-l2x0.c
parenta1741e7fcbc19a67520115df480ab17012cc3d0b (diff)
locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r--arch/arm/mm/cache-l2x0.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9ecfdb511951..3255c51e3e35 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -26,7 +26,7 @@
26#define CACHE_LINE_SIZE 32 26#define CACHE_LINE_SIZE 32
27 27
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_RAW_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 30static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31static uint32_t l2x0_size; 31static uint32_t l2x0_size;
32 32
@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
115{ 115{
116 unsigned long flags; 116 unsigned long flags;
117 117
118 spin_lock_irqsave(&l2x0_lock, flags); 118 raw_spin_lock_irqsave(&l2x0_lock, flags);
119 cache_sync(); 119 cache_sync();
120 spin_unlock_irqrestore(&l2x0_lock, flags); 120 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
121} 121}
122 122
123static void __l2x0_flush_all(void) 123static void __l2x0_flush_all(void)
@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
134 unsigned long flags; 134 unsigned long flags;
135 135
136 /* clean all ways */ 136 /* clean all ways */
137 spin_lock_irqsave(&l2x0_lock, flags); 137 raw_spin_lock_irqsave(&l2x0_lock, flags);
138 __l2x0_flush_all(); 138 __l2x0_flush_all();
139 spin_unlock_irqrestore(&l2x0_lock, flags); 139 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
140} 140}
141 141
142static void l2x0_clean_all(void) 142static void l2x0_clean_all(void)
@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
144 unsigned long flags; 144 unsigned long flags;
145 145
146 /* clean all ways */ 146 /* clean all ways */
147 spin_lock_irqsave(&l2x0_lock, flags); 147 raw_spin_lock_irqsave(&l2x0_lock, flags);
148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
149 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 149 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
150 cache_sync(); 150 cache_sync();
151 spin_unlock_irqrestore(&l2x0_lock, flags); 151 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
152} 152}
153 153
154static void l2x0_inv_all(void) 154static void l2x0_inv_all(void)
@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
156 unsigned long flags; 156 unsigned long flags;
157 157
158 /* invalidate all ways */ 158 /* invalidate all ways */
159 spin_lock_irqsave(&l2x0_lock, flags); 159 raw_spin_lock_irqsave(&l2x0_lock, flags);
160 /* Invalidating when L2 is enabled is a nono */ 160 /* Invalidating when L2 is enabled is a nono */
161 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 161 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
162 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 162 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
163 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 163 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
164 cache_sync(); 164 cache_sync();
165 spin_unlock_irqrestore(&l2x0_lock, flags); 165 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
166} 166}
167 167
168static void l2x0_inv_range(unsigned long start, unsigned long end) 168static void l2x0_inv_range(unsigned long start, unsigned long end)
@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
170 void __iomem *base = l2x0_base; 170 void __iomem *base = l2x0_base;
171 unsigned long flags; 171 unsigned long flags;
172 172
173 spin_lock_irqsave(&l2x0_lock, flags); 173 raw_spin_lock_irqsave(&l2x0_lock, flags);
174 if (start & (CACHE_LINE_SIZE - 1)) { 174 if (start & (CACHE_LINE_SIZE - 1)) {
175 start &= ~(CACHE_LINE_SIZE - 1); 175 start &= ~(CACHE_LINE_SIZE - 1);
176 debug_writel(0x03); 176 debug_writel(0x03);
@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
195 } 195 }
196 196
197 if (blk_end < end) { 197 if (blk_end < end) {
198 spin_unlock_irqrestore(&l2x0_lock, flags); 198 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
199 spin_lock_irqsave(&l2x0_lock, flags); 199 raw_spin_lock_irqsave(&l2x0_lock, flags);
200 } 200 }
201 } 201 }
202 cache_wait(base + L2X0_INV_LINE_PA, 1); 202 cache_wait(base + L2X0_INV_LINE_PA, 1);
203 cache_sync(); 203 cache_sync();
204 spin_unlock_irqrestore(&l2x0_lock, flags); 204 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
205} 205}
206 206
207static void l2x0_clean_range(unsigned long start, unsigned long end) 207static void l2x0_clean_range(unsigned long start, unsigned long end)
@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
214 return; 214 return;
215 } 215 }
216 216
217 spin_lock_irqsave(&l2x0_lock, flags); 217 raw_spin_lock_irqsave(&l2x0_lock, flags);
218 start &= ~(CACHE_LINE_SIZE - 1); 218 start &= ~(CACHE_LINE_SIZE - 1);
219 while (start < end) { 219 while (start < end) {
220 unsigned long blk_end = start + min(end - start, 4096UL); 220 unsigned long blk_end = start + min(end - start, 4096UL);
@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
225 } 225 }
226 226
227 if (blk_end < end) { 227 if (blk_end < end) {
228 spin_unlock_irqrestore(&l2x0_lock, flags); 228 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
229 spin_lock_irqsave(&l2x0_lock, flags); 229 raw_spin_lock_irqsave(&l2x0_lock, flags);
230 } 230 }
231 } 231 }
232 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 232 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
233 cache_sync(); 233 cache_sync();
234 spin_unlock_irqrestore(&l2x0_lock, flags); 234 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
235} 235}
236 236
237static void l2x0_flush_range(unsigned long start, unsigned long end) 237static void l2x0_flush_range(unsigned long start, unsigned long end)
@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
244 return; 244 return;
245 } 245 }
246 246
247 spin_lock_irqsave(&l2x0_lock, flags); 247 raw_spin_lock_irqsave(&l2x0_lock, flags);
248 start &= ~(CACHE_LINE_SIZE - 1); 248 start &= ~(CACHE_LINE_SIZE - 1);
249 while (start < end) { 249 while (start < end) {
250 unsigned long blk_end = start + min(end - start, 4096UL); 250 unsigned long blk_end = start + min(end - start, 4096UL);
@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
257 debug_writel(0x00); 257 debug_writel(0x00);
258 258
259 if (blk_end < end) { 259 if (blk_end < end) {
260 spin_unlock_irqrestore(&l2x0_lock, flags); 260 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
261 spin_lock_irqsave(&l2x0_lock, flags); 261 raw_spin_lock_irqsave(&l2x0_lock, flags);
262 } 262 }
263 } 263 }
264 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 264 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
265 cache_sync(); 265 cache_sync();
266 spin_unlock_irqrestore(&l2x0_lock, flags); 266 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
267} 267}
268 268
269static void l2x0_disable(void) 269static void l2x0_disable(void)
270{ 270{
271 unsigned long flags; 271 unsigned long flags;
272 272
273 spin_lock_irqsave(&l2x0_lock, flags); 273 raw_spin_lock_irqsave(&l2x0_lock, flags);
274 __l2x0_flush_all(); 274 __l2x0_flush_all();
275 writel_relaxed(0, l2x0_base + L2X0_CTRL); 275 writel_relaxed(0, l2x0_base + L2X0_CTRL);
276 dsb(); 276 dsb();
277 spin_unlock_irqrestore(&l2x0_lock, flags); 277 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
278} 278}
279 279
280static void __init l2x0_unlock(__u32 cache_id) 280static void __init l2x0_unlock(__u32 cache_id)