aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-l2x0.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r--arch/arm/mm/cache-l2x0.c103
1 files changed, 88 insertions, 15 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 44c086710d2..0dddb54ea98 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -29,6 +29,16 @@ static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 30static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31static uint32_t l2x0_size; 31static uint32_t l2x0_size;
32static u32 l2x0_cache_id;
33static unsigned int l2x0_sets;
34static unsigned int l2x0_ways;
35
36static inline bool is_pl310_rev(int rev)
37{
38 return (l2x0_cache_id &
39 (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
40 (L2X0_CACHE_ID_PART_L310 | rev);
41}
32 42
33static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 43static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
34{ 44{
@@ -120,6 +130,23 @@ static void l2x0_cache_sync(void)
120 spin_unlock_irqrestore(&l2x0_lock, flags); 130 spin_unlock_irqrestore(&l2x0_lock, flags);
121} 131}
122 132
133#ifdef CONFIG_PL310_ERRATA_727915
134static void l2x0_for_each_set_way(void __iomem *reg)
135{
136 int set;
137 int way;
138 unsigned long flags;
139
140 for (way = 0; way < l2x0_ways; way++) {
141 spin_lock_irqsave(&l2x0_lock, flags);
142 for (set = 0; set < l2x0_sets; set++)
143 writel_relaxed((way << 28) | (set << 5), reg);
144 cache_sync();
145 spin_unlock_irqrestore(&l2x0_lock, flags);
146 }
147}
148#endif
149
123static void __l2x0_flush_all(void) 150static void __l2x0_flush_all(void)
124{ 151{
125 debug_writel(0x03); 152 debug_writel(0x03);
@@ -133,6 +160,13 @@ static void l2x0_flush_all(void)
133{ 160{
134 unsigned long flags; 161 unsigned long flags;
135 162
163#ifdef CONFIG_PL310_ERRATA_727915
164 if (is_pl310_rev(REV_PL310_R2P0)) {
165 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
166 return;
167 }
168#endif
169
136 /* clean all ways */ 170 /* clean all ways */
137 spin_lock_irqsave(&l2x0_lock, flags); 171 spin_lock_irqsave(&l2x0_lock, flags);
138 __l2x0_flush_all(); 172 __l2x0_flush_all();
@@ -143,11 +177,20 @@ static void l2x0_clean_all(void)
143{ 177{
144 unsigned long flags; 178 unsigned long flags;
145 179
180#ifdef CONFIG_PL310_ERRATA_727915
181 if (is_pl310_rev(REV_PL310_R2P0)) {
182 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
183 return;
184 }
185#endif
186
146 /* clean all ways */ 187 /* clean all ways */
147 spin_lock_irqsave(&l2x0_lock, flags); 188 spin_lock_irqsave(&l2x0_lock, flags);
189 debug_writel(0x03);
148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 190 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
149 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 191 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
150 cache_sync(); 192 cache_sync();
193 debug_writel(0x00);
151 spin_unlock_irqrestore(&l2x0_lock, flags); 194 spin_unlock_irqrestore(&l2x0_lock, flags);
152} 195}
153 196
@@ -266,6 +309,16 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
266 spin_unlock_irqrestore(&l2x0_lock, flags); 309 spin_unlock_irqrestore(&l2x0_lock, flags);
267} 310}
268 311
312/* enables l2x0 after l2x0_disable, does not invalidate */
313void l2x0_enable(void)
314{
315 unsigned long flags;
316
317 spin_lock_irqsave(&l2x0_lock, flags);
318 writel_relaxed(1, l2x0_base + L2X0_CTRL);
319 spin_unlock_irqrestore(&l2x0_lock, flags);
320}
321
269static void l2x0_disable(void) 322static void l2x0_disable(void)
270{ 323{
271 unsigned long flags; 324 unsigned long flags;
@@ -277,50 +330,68 @@ static void l2x0_disable(void)
277 spin_unlock_irqrestore(&l2x0_lock, flags); 330 spin_unlock_irqrestore(&l2x0_lock, flags);
278} 331}
279 332
280void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 333static void __init l2x0_unlock(__u32 cache_id)
334{
335 int lockregs;
336 int i;
337
338 if (cache_id == L2X0_CACHE_ID_PART_L310)
339 lockregs = 8;
340 else
341 /* L210 and unknown types */
342 lockregs = 1;
343
344 for (i = 0; i < lockregs; i++) {
345 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
346 i * L2X0_LOCKDOWN_STRIDE);
347 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
348 i * L2X0_LOCKDOWN_STRIDE);
349 }
350}
351
352void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
281{ 353{
282 __u32 aux; 354 __u32 aux;
283 __u32 cache_id;
284 __u32 way_size = 0; 355 __u32 way_size = 0;
285 int ways;
286 const char *type; 356 const char *type;
287 357
288 l2x0_base = base; 358 l2x0_base = base;
289 359
290 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 360 l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
291 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 361 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
292 362
293 aux &= aux_mask; 363 aux &= aux_mask;
294 aux |= aux_val; 364 aux |= aux_val;
295 365
296 /* Determine the number of ways */ 366 /* Determine the number of ways */
297 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 367 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
298 case L2X0_CACHE_ID_PART_L310: 368 case L2X0_CACHE_ID_PART_L310:
299 if (aux & (1 << 16)) 369 if (aux & (1 << 16))
300 ways = 16; 370 l2x0_ways = 16;
301 else 371 else
302 ways = 8; 372 l2x0_ways = 8;
303 type = "L310"; 373 type = "L310";
304 break; 374 break;
305 case L2X0_CACHE_ID_PART_L210: 375 case L2X0_CACHE_ID_PART_L210:
306 ways = (aux >> 13) & 0xf; 376 l2x0_ways = (aux >> 13) & 0xf;
307 type = "L210"; 377 type = "L210";
308 break; 378 break;
309 default: 379 default:
310 /* Assume unknown chips have 8 ways */ 380 /* Assume unknown chips have 8 ways */
311 ways = 8; 381 l2x0_ways = 8;
312 type = "L2x0 series"; 382 type = "L2x0 series";
313 break; 383 break;
314 } 384 }
315 385
316 l2x0_way_mask = (1 << ways) - 1; 386 l2x0_way_mask = (1 << l2x0_ways) - 1;
317 387
318 /* 388 /*
319 * L2 cache Size = Way size * Number of ways 389 * L2 cache Size = Way size * Number of ways
320 */ 390 */
321 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 391 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
322 way_size = 1 << (way_size + 3); 392 way_size = SZ_1K << (way_size + 3);
323 l2x0_size = ways * way_size * SZ_1K; 393 l2x0_size = l2x0_ways * way_size;
394 l2x0_sets = way_size / CACHE_LINE_SIZE;
324 395
325 /* 396 /*
326 * Check if l2x0 controller is already enabled. 397 * Check if l2x0 controller is already enabled.
@@ -328,6 +399,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
328 * accessing the below registers will fault. 399 * accessing the below registers will fault.
329 */ 400 */
330 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 401 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
402 /* Make sure that I&D is not locked down when starting */
403 l2x0_unlock(l2x0_cache_id);
331 404
332 /* l2x0 controller is disabled */ 405 /* l2x0 controller is disabled */
333 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 406 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -347,7 +420,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
347 outer_cache.disable = l2x0_disable; 420 outer_cache.disable = l2x0_disable;
348 outer_cache.set_debug = l2x0_set_debug; 421 outer_cache.set_debug = l2x0_set_debug;
349 422
350 printk(KERN_INFO "%s cache controller enabled\n", type); 423 pr_info_once("%s cache controller enabled\n", type);
351 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 424 pr_info_once("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
352 ways, cache_id, aux, l2x0_size); 425 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
353} 426}