diff options
author | Thomas Gleixner <[tglx@linutronix.de]> | 2010-07-31 11:35:24 -0400 |
---|---|---|
committer | Santosh Shilimkar <santosh.shilimkar@ti.com> | 2010-10-26 02:09:58 -0400 |
commit | 2fd8658931193599c867fd6974fa184ec34af16c (patch) | |
tree | 7d9a05dbd6996eec6ad38aab93d4cc6a1c187a23 | |
parent | ae360a78f41164e7f9c4cf846696b5b6d8dae5c8 (diff) |
arm: Implement l2x0 cache disable functions
Add flush_all, inv_all and disable functions to the l2x0 code. These
functions are called from kexec code to prevent random crashes in the
new kernel.
Platforms like OMAP which control L2 enable/disable via SMI mode can
override the outer_cache.disable() function to implement their own.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Linus Walleij <linus.walleij@stericsson.com>
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index edb43ff7aeef..9310d618070b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -112,12 +112,26 @@ static void l2x0_cache_sync(void) | |||
112 | spin_unlock_irqrestore(&l2x0_lock, flags); | 112 | spin_unlock_irqrestore(&l2x0_lock, flags); |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void l2x0_inv_all(void) | 115 | static void l2x0_flush_all(void) |
116 | { | ||
117 | unsigned long flags; | ||
118 | |||
119 | /* clean all ways */ | ||
120 | spin_lock_irqsave(&l2x0_lock, flags); | ||
121 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); | ||
122 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); | ||
123 | cache_sync(); | ||
124 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
125 | } | ||
126 | |||
127 | static void l2x0_inv_all(void) | ||
116 | { | 128 | { |
117 | unsigned long flags; | 129 | unsigned long flags; |
118 | 130 | ||
119 | /* invalidate all ways */ | 131 | /* invalidate all ways */ |
120 | spin_lock_irqsave(&l2x0_lock, flags); | 132 | spin_lock_irqsave(&l2x0_lock, flags); |
133 | /* Invalidating when L2 is enabled is a nono */ | ||
134 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | ||
121 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 135 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
122 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 136 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
123 | cache_sync(); | 137 | cache_sync(); |
@@ -215,6 +229,15 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
215 | spin_unlock_irqrestore(&l2x0_lock, flags); | 229 | spin_unlock_irqrestore(&l2x0_lock, flags); |
216 | } | 230 | } |
217 | 231 | ||
232 | static void l2x0_disable(void) | ||
233 | { | ||
234 | unsigned long flags; | ||
235 | |||
236 | spin_lock_irqsave(&l2x0_lock, flags); | ||
237 | writel(0, l2x0_base + L2X0_CTRL); | ||
238 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
239 | } | ||
240 | |||
218 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | 241 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) |
219 | { | 242 | { |
220 | __u32 aux; | 243 | __u32 aux; |
@@ -272,6 +295,9 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
272 | outer_cache.clean_range = l2x0_clean_range; | 295 | outer_cache.clean_range = l2x0_clean_range; |
273 | outer_cache.flush_range = l2x0_flush_range; | 296 | outer_cache.flush_range = l2x0_flush_range; |
274 | outer_cache.sync = l2x0_cache_sync; | 297 | outer_cache.sync = l2x0_cache_sync; |
298 | outer_cache.flush_all = l2x0_flush_all; | ||
299 | outer_cache.inv_all = l2x0_inv_all; | ||
300 | outer_cache.disable = l2x0_disable; | ||
275 | 301 | ||
276 | printk(KERN_INFO "%s cache controller enabled\n", type); | 302 | printk(KERN_INFO "%s cache controller enabled\n", type); |
277 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", | 303 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", |