aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsrinidhi kasagar <srinidhi.kasagar@stericsson.com>2012-01-17 00:59:39 -0500
committerLinus Walleij <linus.walleij@linaro.org>2012-01-20 07:12:48 -0500
commitdd821823fad54d6ccc328e2f1b9698a6f9bd8f3e (patch)
tree97c0593108e543475223cf7c96a50db5cdc3fa29
parenta5fea953bbcff57f30cf5027ae63069a869f8e31 (diff)
mach-ux500: do not override outer.inv_all
Invalidating outer cache without disabling it is a big nono, and so, remove the machine specific outer.inv_all And at the same time it does not prevent us overriding outer.disable as we do not have any such secure SMI to handle the same while kexec disables the outer cache. Signed-off-by: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c48
1 files changed, 7 insertions, 41 deletions
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 122ddde00ba7..da5569d83d58 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -12,44 +12,6 @@
12 12
13static void __iomem *l2x0_base; 13static void __iomem *l2x0_base;
14 14
15static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
16{
17 /* wait for the operation to complete */
18 while (readl_relaxed(reg) & mask)
19 cpu_relax();
20}
21
22static inline void ux500_cache_sync(void)
23{
24 writel_relaxed(0, l2x0_base + L2X0_CACHE_SYNC);
25 ux500_cache_wait(l2x0_base + L2X0_CACHE_SYNC, 1);
26}
27
28/*
29 * The L2 cache cannot be turned off in the non-secure world.
30 * Dummy until a secure service is in place.
31 */
32static void ux500_l2x0_disable(void)
33{
34}
35
36/*
37 * This is only called when doing a kexec, just after turning off the L2
38 * and L1 cache, and it is surrounded by a spinlock in the generic version.
39 * However, we're not really turning off the L2 cache right now and the
40 * PL310 does not support exclusive accesses (used to implement the spinlock).
41 * So, the invalidation needs to be done without the spinlock.
42 */
43static void ux500_l2x0_inv_all(void)
44{
45 uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */
46
47 /* invalidate all ways */
48 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
49 ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
50 ux500_cache_sync();
51}
52
53static int __init ux500_l2x0_unlock(void) 15static int __init ux500_l2x0_unlock(void)
54{ 16{
55 int i; 17 int i;
@@ -85,9 +47,13 @@ static int __init ux500_l2x0_init(void)
85 /* 64KB way size, 8 way associativity, force WA */ 47 /* 64KB way size, 8 way associativity, force WA */
86 l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); 48 l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
87 49
88 /* Override invalidate function */ 50 /*
89 outer_cache.disable = ux500_l2x0_disable; 51 * We can't disable l2 as we are in non secure mode, currently
90 outer_cache.inv_all = ux500_l2x0_inv_all; 52 * this seems be called only during kexec path. So let's
53 * override outer.disable with nasty assignment until we have
54 * some SMI service available.
55 */
56 outer_cache.disable = NULL;
91 57
92 return 0; 58 return 0;
93} 59}