aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-l2x0.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-11-19 06:12:15 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-14 08:34:58 -0500
commit0eb948dd7f7c3cec37440c16a6c738c8e75efcda (patch)
tree96ecc9b1a3b5326f7de82cd2055b448a7e43c9f8 /arch/arm/mm/cache-l2x0.c
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
ARM: cache-l2x0: avoid taking spinlock for every iteration
Taking the spinlock for every iteration is very expensive; instead, batch iterations up into 4K blocks, releasing and reacquiring the spinlock between each block. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r--arch/arm/mm/cache-l2x0.c65
1 files changed, 52 insertions, 13 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b480f1d3591f..c1b7bfff47f4 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -31,14 +31,10 @@ static DEFINE_SPINLOCK(l2x0_lock);
31static inline void sync_writel(unsigned long val, unsigned long reg, 31static inline void sync_writel(unsigned long val, unsigned long reg,
32 unsigned long complete_mask) 32 unsigned long complete_mask)
33{ 33{
34 unsigned long flags;
35
36 spin_lock_irqsave(&l2x0_lock, flags);
37 writel(val, l2x0_base + reg); 34 writel(val, l2x0_base + reg);
38 /* wait for the operation to complete */ 35 /* wait for the operation to complete */
39 while (readl(l2x0_base + reg) & complete_mask) 36 while (readl(l2x0_base + reg) & complete_mask)
40 ; 37 ;
41 spin_unlock_irqrestore(&l2x0_lock, flags);
42} 38}
43 39
44static inline void cache_sync(void) 40static inline void cache_sync(void)
@@ -48,15 +44,20 @@ static inline void cache_sync(void)
48 44
49static inline void l2x0_inv_all(void) 45static inline void l2x0_inv_all(void)
50{ 46{
47 unsigned long flags;
48
51 /* invalidate all ways */ 49 /* invalidate all ways */
50 spin_lock_irqsave(&l2x0_lock, flags);
52 sync_writel(0xff, L2X0_INV_WAY, 0xff); 51 sync_writel(0xff, L2X0_INV_WAY, 0xff);
53 cache_sync(); 52 cache_sync();
53 spin_unlock_irqrestore(&l2x0_lock, flags);
54} 54}
55 55
56static void l2x0_inv_range(unsigned long start, unsigned long end) 56static void l2x0_inv_range(unsigned long start, unsigned long end)
57{ 57{
58 unsigned long addr; 58 unsigned long flags;
59 59
60 spin_lock_irqsave(&l2x0_lock, flags);
60 if (start & (CACHE_LINE_SIZE - 1)) { 61 if (start & (CACHE_LINE_SIZE - 1)) {
61 start &= ~(CACHE_LINE_SIZE - 1); 62 start &= ~(CACHE_LINE_SIZE - 1);
62 sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); 63 sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
@@ -68,29 +69,67 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
68 sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); 69 sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
69 } 70 }
70 71
71 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 72 while (start < end) {
72 sync_writel(addr, L2X0_INV_LINE_PA, 1); 73 unsigned long blk_end = start + min(end - start, 4096UL);
74
75 while (start < blk_end) {
76 sync_writel(start, L2X0_INV_LINE_PA, 1);
77 start += CACHE_LINE_SIZE;
78 }
79
80 if (blk_end < end) {
81 spin_unlock_irqrestore(&l2x0_lock, flags);
82 spin_lock_irqsave(&l2x0_lock, flags);
83 }
84 }
73 cache_sync(); 85 cache_sync();
86 spin_unlock_irqrestore(&l2x0_lock, flags);
74} 87}
75 88
76static void l2x0_clean_range(unsigned long start, unsigned long end) 89static void l2x0_clean_range(unsigned long start, unsigned long end)
77{ 90{
78 unsigned long addr; 91 unsigned long flags;
79 92
93 spin_lock_irqsave(&l2x0_lock, flags);
80 start &= ~(CACHE_LINE_SIZE - 1); 94 start &= ~(CACHE_LINE_SIZE - 1);
81 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 95 while (start < end) {
82 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); 96 unsigned long blk_end = start + min(end - start, 4096UL);
97
98 while (start < blk_end) {
99 sync_writel(start, L2X0_CLEAN_LINE_PA, 1);
100 start += CACHE_LINE_SIZE;
101 }
102
103 if (blk_end < end) {
104 spin_unlock_irqrestore(&l2x0_lock, flags);
105 spin_lock_irqsave(&l2x0_lock, flags);
106 }
107 }
83 cache_sync(); 108 cache_sync();
109 spin_unlock_irqrestore(&l2x0_lock, flags);
84} 110}
85 111
86static void l2x0_flush_range(unsigned long start, unsigned long end) 112static void l2x0_flush_range(unsigned long start, unsigned long end)
87{ 113{
88 unsigned long addr; 114 unsigned long flags;
89 115
116 spin_lock_irqsave(&l2x0_lock, flags);
90 start &= ~(CACHE_LINE_SIZE - 1); 117 start &= ~(CACHE_LINE_SIZE - 1);
91 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 118 while (start < end) {
92 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); 119 unsigned long blk_end = start + min(end - start, 4096UL);
120
121 while (start < blk_end) {
122 sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
123 start += CACHE_LINE_SIZE;
124 }
125
126 if (blk_end < end) {
127 spin_unlock_irqrestore(&l2x0_lock, flags);
128 spin_lock_irqsave(&l2x0_lock, flags);
129 }
130 }
93 cache_sync(); 131 cache_sync();
132 spin_unlock_irqrestore(&l2x0_lock, flags);
94} 133}
95 134
96void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 135void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)