diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2010-08-31 08:05:22 -0400 |
---|---|---|
committer | Santosh Shilimkar <santosh.shilimkar@ti.com> | 2010-10-26 02:09:54 -0400 |
commit | 9a6655e49fd98f3748bb80da20705448aad9ee57 (patch) | |
tree | db5aba3a886712f54f4816137c4cea08b954f5c6 /arch/arm/mm | |
parent | 899611ee7d373e5eeda08e9a8632684e1ebbbf00 (diff) |
ARM: Improve the L2 cache performance when PL310 is used
With this L2 cache controller, the cache maintenance by PA and sync
operations are atomic and do not require a "wait" loop. This patch
conditionally defines the cache_wait() function.
Since L2x0 cache controllers do not work with ARMv7 CPUs, the patch
automatically enables CACHE_PL310 when only CPU_V7 is defined.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 8 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 15 |
2 files changed, 20 insertions, 3 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index a0a2928ae4dd..4414a01e1e8a 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -779,6 +779,14 @@ config CACHE_L2X0 | |||
779 | help | 779 | help |
780 | This option enables the L2x0 PrimeCell. | 780 | This option enables the L2x0 PrimeCell. |
781 | 781 | ||
782 | config CACHE_PL310 | ||
783 | bool | ||
784 | depends on CACHE_L2X0 | ||
785 | default y if CPU_V7 && !CPU_V6 | ||
786 | help | ||
787 | This option enables optimisations for the PL310 cache | ||
788 | controller. | ||
789 | |||
782 | config CACHE_TAUROS2 | 790 | config CACHE_TAUROS2 |
783 | bool "Enable the Tauros2 L2 cache controller" | 791 | bool "Enable the Tauros2 L2 cache controller" |
784 | depends on (ARCH_DOVE || ARCH_MMP) | 792 | depends on (ARCH_DOVE || ARCH_MMP) |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9982eb385c0f..edb43ff7aeef 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -29,13 +29,22 @@ static void __iomem *l2x0_base; | |||
29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
31 | 31 | ||
32 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | 32 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
33 | { | 33 | { |
34 | /* wait for the operation to complete */ | 34 | /* wait for cache operation by line or way to complete */ |
35 | while (readl_relaxed(reg) & mask) | 35 | while (readl_relaxed(reg) & mask) |
36 | ; | 36 | ; |
37 | } | 37 | } |
38 | 38 | ||
39 | #ifdef CONFIG_CACHE_PL310 | ||
40 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | ||
41 | { | ||
42 | /* cache operations by line are atomic on PL310 */ | ||
43 | } | ||
44 | #else | ||
45 | #define cache_wait cache_wait_way | ||
46 | #endif | ||
47 | |||
39 | static inline void cache_sync(void) | 48 | static inline void cache_sync(void) |
40 | { | 49 | { |
41 | void __iomem *base = l2x0_base; | 50 | void __iomem *base = l2x0_base; |
@@ -110,7 +119,7 @@ static inline void l2x0_inv_all(void) | |||
110 | /* invalidate all ways */ | 119 | /* invalidate all ways */ |
111 | spin_lock_irqsave(&l2x0_lock, flags); | 120 | spin_lock_irqsave(&l2x0_lock, flags); |
112 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 121 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
113 | cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 122 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
114 | cache_sync(); | 123 | cache_sync(); |
115 | spin_unlock_irqrestore(&l2x0_lock, flags); | 124 | spin_unlock_irqrestore(&l2x0_lock, flags); |
116 | } | 125 | } |