diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2010-08-31 08:05:22 -0400 |
---|---|---|
committer | Santosh Shilimkar <santosh.shilimkar@ti.com> | 2010-10-26 02:09:54 -0400 |
commit | 9a6655e49fd98f3748bb80da20705448aad9ee57 (patch) | |
tree | db5aba3a886712f54f4816137c4cea08b954f5c6 /arch/arm/mm/cache-l2x0.c | |
parent | 899611ee7d373e5eeda08e9a8632684e1ebbbf00 (diff) |
ARM: Improve the L2 cache performance when PL310 is used
With this L2 cache controller, the cache maintenance by PA and sync
operations are atomic and do not require a "wait" loop. This patch
conditionally defines the cache_wait() function.
Since L2x0 cache controllers do not work with ARMv7 CPUs, the patch
automatically enables CACHE_PL310 when only CPU_V7 is defined.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9982eb385c0f..edb43ff7aeef 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -29,13 +29,22 @@ static void __iomem *l2x0_base; | |||
29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
31 | 31 | ||
32 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | 32 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
33 | { | 33 | { |
34 | /* wait for the operation to complete */ | 34 | /* wait for cache operation by line or way to complete */ |
35 | while (readl_relaxed(reg) & mask) | 35 | while (readl_relaxed(reg) & mask) |
36 | ; | 36 | ; |
37 | } | 37 | } |
38 | 38 | ||
39 | #ifdef CONFIG_CACHE_PL310 | ||
40 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | ||
41 | { | ||
42 | /* cache operations by line are atomic on PL310 */ | ||
43 | } | ||
44 | #else | ||
45 | #define cache_wait cache_wait_way | ||
46 | #endif | ||
47 | |||
39 | static inline void cache_sync(void) | 48 | static inline void cache_sync(void) |
40 | { | 49 | { |
41 | void __iomem *base = l2x0_base; | 50 | void __iomem *base = l2x0_base; |
@@ -110,7 +119,7 @@ static inline void l2x0_inv_all(void) | |||
110 | /* invalidate all ways */ | 119 | /* invalidate all ways */ |
111 | spin_lock_irqsave(&l2x0_lock, flags); | 120 | spin_lock_irqsave(&l2x0_lock, flags); |
112 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 121 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
113 | cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 122 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
114 | cache_sync(); | 123 | cache_sync(); |
115 | spin_unlock_irqrestore(&l2x0_lock, flags); | 124 | spin_unlock_irqrestore(&l2x0_lock, flags); |
116 | } | 125 | } |