diff options
| -rw-r--r-- | arch/arm/mach-ux500/cpu.c | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index e0fd747e447a..73fb1a551ec6 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/io.h> | 10 | #include <linux/io.h> |
| 11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
| 12 | 12 | ||
| 13 | #include <asm/cacheflush.h> | ||
| 13 | #include <asm/hardware/cache-l2x0.h> | 14 | #include <asm/hardware/cache-l2x0.h> |
| 14 | #include <asm/hardware/gic.h> | 15 | #include <asm/hardware/gic.h> |
| 15 | #include <asm/mach/map.h> | 16 | #include <asm/mach/map.h> |
| @@ -71,6 +72,46 @@ void __init ux500_init_irq(void) | |||
| 71 | } | 72 | } |
| 72 | 73 | ||
| 73 | #ifdef CONFIG_CACHE_L2X0 | 74 | #ifdef CONFIG_CACHE_L2X0 |
| 75 | static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) | ||
| 76 | { | ||
| 77 | /* wait for the operation to complete */ | ||
| 78 | while (readl(reg) & mask) | ||
| 79 | ; | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline void ux500_cache_sync(void) | ||
| 83 | { | ||
| 84 | void __iomem *base = __io_address(UX500_L2CC_BASE); | ||
| 85 | writel(0, base + L2X0_CACHE_SYNC); | ||
| 86 | ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); | ||
| 87 | } | ||
| 88 | |||
| 89 | /* | ||
| 90 | * The L2 cache cannot be turned off in the non-secure world. | ||
| 91 | * Dummy until a secure service is in place. | ||
| 92 | */ | ||
| 93 | static void ux500_l2x0_disable(void) | ||
| 94 | { | ||
| 95 | } | ||
| 96 | |||
| 97 | /* | ||
| 98 | * This is only called when doing a kexec, just after turning off the L2 | ||
| 99 | * and L1 cache, and it is surrounded by a spinlock in the generic version. | ||
| 100 | * However, we're not really turning off the L2 cache right now and the | ||
| 101 | * PL310 does not support exclusive accesses (used to implement the spinlock). | ||
| 102 | * So, the invalidation needs to be done without the spinlock. | ||
| 103 | */ | ||
| 104 | static void ux500_l2x0_inv_all(void) | ||
| 105 | { | ||
| 106 | void __iomem *l2x0_base = __io_address(UX500_L2CC_BASE); | ||
| 107 | uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ | ||
| 108 | |||
| 109 | /* invalidate all ways */ | ||
| 110 | writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | ||
| 111 | ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | ||
| 112 | ux500_cache_sync(); | ||
| 113 | } | ||
| 114 | |||
| 74 | static int ux500_l2x0_init(void) | 115 | static int ux500_l2x0_init(void) |
| 75 | { | 116 | { |
| 76 | void __iomem *l2x0_base; | 117 | void __iomem *l2x0_base; |
| @@ -80,6 +121,10 @@ static int ux500_l2x0_init(void) | |||
| 80 | /* 64KB way size, 8 way associativity, force WA */ | 121 | /* 64KB way size, 8 way associativity, force WA */ |
| 81 | l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); | 122 | l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); |
| 82 | 123 | ||
| 124 | /* Override invalidate function */ | ||
| 125 | outer_cache.disable = ux500_l2x0_disable; | ||
| 126 | outer_cache.inv_all = ux500_l2x0_inv_all; | ||
| 127 | |||
| 83 | return 0; | 128 | return 0; |
| 84 | } | 129 | } |
| 85 | early_initcall(ux500_l2x0_init); | 130 | early_initcall(ux500_l2x0_init); |
