diff options
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 78 |
1 files changed, 72 insertions, 6 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9982eb385c0f..170c9bb95866 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -28,14 +28,24 @@ | |||
28 | static void __iomem *l2x0_base; | 28 | static void __iomem *l2x0_base; |
29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
31 | static uint32_t l2x0_size; | ||
31 | 32 | ||
32 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | 33 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
33 | { | 34 | { |
34 | /* wait for the operation to complete */ | 35 | /* wait for cache operation by line or way to complete */ |
35 | while (readl_relaxed(reg) & mask) | 36 | while (readl_relaxed(reg) & mask) |
36 | ; | 37 | ; |
37 | } | 38 | } |
38 | 39 | ||
40 | #ifdef CONFIG_CACHE_PL310 | ||
41 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | ||
42 | { | ||
43 | /* cache operations by line are atomic on PL310 */ | ||
44 | } | ||
45 | #else | ||
46 | #define cache_wait cache_wait_way | ||
47 | #endif | ||
48 | |||
39 | static inline void cache_sync(void) | 49 | static inline void cache_sync(void) |
40 | { | 50 | { |
41 | void __iomem *base = l2x0_base; | 51 | void __iomem *base = l2x0_base; |
@@ -103,14 +113,40 @@ static void l2x0_cache_sync(void) | |||
103 | spin_unlock_irqrestore(&l2x0_lock, flags); | 113 | spin_unlock_irqrestore(&l2x0_lock, flags); |
104 | } | 114 | } |
105 | 115 | ||
106 | static inline void l2x0_inv_all(void) | 116 | static void l2x0_flush_all(void) |
117 | { | ||
118 | unsigned long flags; | ||
119 | |||
120 | /* clean all ways */ | ||
121 | spin_lock_irqsave(&l2x0_lock, flags); | ||
122 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); | ||
123 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); | ||
124 | cache_sync(); | ||
125 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
126 | } | ||
127 | |||
128 | static void l2x0_clean_all(void) | ||
129 | { | ||
130 | unsigned long flags; | ||
131 | |||
132 | /* clean all ways */ | ||
133 | spin_lock_irqsave(&l2x0_lock, flags); | ||
134 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | ||
135 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | ||
136 | cache_sync(); | ||
137 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
138 | } | ||
139 | |||
140 | static void l2x0_inv_all(void) | ||
107 | { | 141 | { |
108 | unsigned long flags; | 142 | unsigned long flags; |
109 | 143 | ||
110 | /* invalidate all ways */ | 144 | /* invalidate all ways */ |
111 | spin_lock_irqsave(&l2x0_lock, flags); | 145 | spin_lock_irqsave(&l2x0_lock, flags); |
146 | /* Invalidating when L2 is enabled is a nono */ | ||
147 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | ||
112 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 148 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
113 | cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 149 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
114 | cache_sync(); | 150 | cache_sync(); |
115 | spin_unlock_irqrestore(&l2x0_lock, flags); | 151 | spin_unlock_irqrestore(&l2x0_lock, flags); |
116 | } | 152 | } |
@@ -159,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
159 | void __iomem *base = l2x0_base; | 195 | void __iomem *base = l2x0_base; |
160 | unsigned long flags; | 196 | unsigned long flags; |
161 | 197 | ||
198 | if ((end - start) >= l2x0_size) { | ||
199 | l2x0_clean_all(); | ||
200 | return; | ||
201 | } | ||
202 | |||
162 | spin_lock_irqsave(&l2x0_lock, flags); | 203 | spin_lock_irqsave(&l2x0_lock, flags); |
163 | start &= ~(CACHE_LINE_SIZE - 1); | 204 | start &= ~(CACHE_LINE_SIZE - 1); |
164 | while (start < end) { | 205 | while (start < end) { |
@@ -184,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
184 | void __iomem *base = l2x0_base; | 225 | void __iomem *base = l2x0_base; |
185 | unsigned long flags; | 226 | unsigned long flags; |
186 | 227 | ||
228 | if ((end - start) >= l2x0_size) { | ||
229 | l2x0_flush_all(); | ||
230 | return; | ||
231 | } | ||
232 | |||
187 | spin_lock_irqsave(&l2x0_lock, flags); | 233 | spin_lock_irqsave(&l2x0_lock, flags); |
188 | start &= ~(CACHE_LINE_SIZE - 1); | 234 | start &= ~(CACHE_LINE_SIZE - 1); |
189 | while (start < end) { | 235 | while (start < end) { |
@@ -206,10 +252,20 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
206 | spin_unlock_irqrestore(&l2x0_lock, flags); | 252 | spin_unlock_irqrestore(&l2x0_lock, flags); |
207 | } | 253 | } |
208 | 254 | ||
255 | static void l2x0_disable(void) | ||
256 | { | ||
257 | unsigned long flags; | ||
258 | |||
259 | spin_lock_irqsave(&l2x0_lock, flags); | ||
260 | writel(0, l2x0_base + L2X0_CTRL); | ||
261 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
262 | } | ||
263 | |||
209 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | 264 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) |
210 | { | 265 | { |
211 | __u32 aux; | 266 | __u32 aux; |
212 | __u32 cache_id; | 267 | __u32 cache_id; |
268 | __u32 way_size = 0; | ||
213 | int ways; | 269 | int ways; |
214 | const char *type; | 270 | const char *type; |
215 | 271 | ||
@@ -244,6 +300,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
244 | l2x0_way_mask = (1 << ways) - 1; | 300 | l2x0_way_mask = (1 << ways) - 1; |
245 | 301 | ||
246 | /* | 302 | /* |
303 | * L2 cache Size = Way size * Number of ways | ||
304 | */ | ||
305 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | ||
306 | way_size = 1 << (way_size + 3); | ||
307 | l2x0_size = ways * way_size * SZ_1K; | ||
308 | |||
309 | /* | ||
247 | * Check if l2x0 controller is already enabled. | 310 | * Check if l2x0 controller is already enabled. |
248 | * If you are booting from non-secure mode | 311 | * If you are booting from non-secure mode |
249 | * accessing the below registers will fault. | 312 | * accessing the below registers will fault. |
@@ -263,8 +326,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
263 | outer_cache.clean_range = l2x0_clean_range; | 326 | outer_cache.clean_range = l2x0_clean_range; |
264 | outer_cache.flush_range = l2x0_flush_range; | 327 | outer_cache.flush_range = l2x0_flush_range; |
265 | outer_cache.sync = l2x0_cache_sync; | 328 | outer_cache.sync = l2x0_cache_sync; |
329 | outer_cache.flush_all = l2x0_flush_all; | ||
330 | outer_cache.inv_all = l2x0_inv_all; | ||
331 | outer_cache.disable = l2x0_disable; | ||
266 | 332 | ||
267 | printk(KERN_INFO "%s cache controller enabled\n", type); | 333 | printk(KERN_INFO "%s cache controller enabled\n", type); |
268 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", | 334 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", |
269 | ways, cache_id, aux); | 335 | ways, cache_id, aux, l2x0_size); |
270 | } | 336 | } |