aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2010-07-11 05:28:41 -0400
committerSantosh Shilimkar <santosh.shilimkar@ti.com>2010-10-26 02:10:05 -0400
commit444457c1f59d58bc48acf5b4fc585225106c11ff (patch)
tree87d0b8262f776680a6dfc8c48c74fd5eea1f7b2c /arch
parent5ba70372289a1fb378b95cee2cf46b0203d65291 (diff)
ARM: l2x0: Optimise the range based operations
For the big buffers which are in excess of cache size, the maintaince operations by PA are very slow. For such buffers the maintainace operations can be speeded up by using the WAY based method. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Linus Walleij <linus.walleij@stericsson.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/cache-l2x0.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 262c7529bcdb..170c9bb95866 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -125,6 +125,18 @@ static void l2x0_flush_all(void)
125 spin_unlock_irqrestore(&l2x0_lock, flags); 125 spin_unlock_irqrestore(&l2x0_lock, flags);
126} 126}
127 127
128static void l2x0_clean_all(void)
129{
130 unsigned long flags;
131
132 /* clean all ways */
133 spin_lock_irqsave(&l2x0_lock, flags);
134 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
135 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
136 cache_sync();
137 spin_unlock_irqrestore(&l2x0_lock, flags);
138}
139
128static void l2x0_inv_all(void) 140static void l2x0_inv_all(void)
129{ 141{
130 unsigned long flags; 142 unsigned long flags;
@@ -183,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
183 void __iomem *base = l2x0_base; 195 void __iomem *base = l2x0_base;
184 unsigned long flags; 196 unsigned long flags;
185 197
198 if ((end - start) >= l2x0_size) {
199 l2x0_clean_all();
200 return;
201 }
202
186 spin_lock_irqsave(&l2x0_lock, flags); 203 spin_lock_irqsave(&l2x0_lock, flags);
187 start &= ~(CACHE_LINE_SIZE - 1); 204 start &= ~(CACHE_LINE_SIZE - 1);
188 while (start < end) { 205 while (start < end) {
@@ -208,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
208 void __iomem *base = l2x0_base; 225 void __iomem *base = l2x0_base;
209 unsigned long flags; 226 unsigned long flags;
210 227
228 if ((end - start) >= l2x0_size) {
229 l2x0_flush_all();
230 return;
231 }
232
211 spin_lock_irqsave(&l2x0_lock, flags); 233 spin_lock_irqsave(&l2x0_lock, flags);
212 start &= ~(CACHE_LINE_SIZE - 1); 234 start &= ~(CACHE_LINE_SIZE - 1);
213 while (start < end) { 235 while (start < end) {