aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-l2x0.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/cache-l2x0.c')
-rw-r--r--arch/arm/mm/cache-l2x0.c180
1 files changed, 150 insertions, 30 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b480f1d3591f..21ad68ba22ba 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,69 +28,181 @@
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_SPINLOCK(l2x0_lock);
30 30
31static inline void sync_writel(unsigned long val, unsigned long reg, 31static inline void cache_wait(void __iomem *reg, unsigned long mask)
32 unsigned long complete_mask)
33{ 32{
34 unsigned long flags;
35
36 spin_lock_irqsave(&l2x0_lock, flags);
37 writel(val, l2x0_base + reg);
38 /* wait for the operation to complete */ 33 /* wait for the operation to complete */
39 while (readl(l2x0_base + reg) & complete_mask) 34 while (readl(reg) & mask)
40 ; 35 ;
41 spin_unlock_irqrestore(&l2x0_lock, flags);
42} 36}
43 37
44static inline void cache_sync(void) 38static inline void cache_sync(void)
45{ 39{
46 sync_writel(0, L2X0_CACHE_SYNC, 1); 40 void __iomem *base = l2x0_base;
41 writel(0, base + L2X0_CACHE_SYNC);
42 cache_wait(base + L2X0_CACHE_SYNC, 1);
43}
44
45static inline void l2x0_clean_line(unsigned long addr)
46{
47 void __iomem *base = l2x0_base;
48 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
49 writel(addr, base + L2X0_CLEAN_LINE_PA);
50}
51
52static inline void l2x0_inv_line(unsigned long addr)
53{
54 void __iomem *base = l2x0_base;
55 cache_wait(base + L2X0_INV_LINE_PA, 1);
56 writel(addr, base + L2X0_INV_LINE_PA);
57}
58
59#ifdef CONFIG_PL310_ERRATA_588369
60static void debug_writel(unsigned long val)
61{
62 extern void omap_smc1(u32 fn, u32 arg);
63
64 /*
65 * Texas Instrument secure monitor api to modify the
66 * PL310 Debug Control Register.
67 */
68 omap_smc1(0x100, val);
69}
70
71static inline void l2x0_flush_line(unsigned long addr)
72{
73 void __iomem *base = l2x0_base;
74
75 /* Clean by PA followed by Invalidate by PA */
76 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
77 writel(addr, base + L2X0_CLEAN_LINE_PA);
78 cache_wait(base + L2X0_INV_LINE_PA, 1);
79 writel(addr, base + L2X0_INV_LINE_PA);
80}
81#else
82
83/* Optimised out for non-errata case */
84static inline void debug_writel(unsigned long val)
85{
86}
87
88static inline void l2x0_flush_line(unsigned long addr)
89{
90 void __iomem *base = l2x0_base;
91 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
92 writel(addr, base + L2X0_CLEAN_INV_LINE_PA);
93}
94#endif
95
96static void l2x0_cache_sync(void)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&l2x0_lock, flags);
101 cache_sync();
102 spin_unlock_irqrestore(&l2x0_lock, flags);
47} 103}
48 104
49static inline void l2x0_inv_all(void) 105static inline void l2x0_inv_all(void)
50{ 106{
107 unsigned long flags;
108
51 /* invalidate all ways */ 109 /* invalidate all ways */
52 sync_writel(0xff, L2X0_INV_WAY, 0xff); 110 spin_lock_irqsave(&l2x0_lock, flags);
111 writel(0xff, l2x0_base + L2X0_INV_WAY);
112 cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
53 cache_sync(); 113 cache_sync();
114 spin_unlock_irqrestore(&l2x0_lock, flags);
54} 115}
55 116
56static void l2x0_inv_range(unsigned long start, unsigned long end) 117static void l2x0_inv_range(unsigned long start, unsigned long end)
57{ 118{
58 unsigned long addr; 119 void __iomem *base = l2x0_base;
120 unsigned long flags;
59 121
122 spin_lock_irqsave(&l2x0_lock, flags);
60 if (start & (CACHE_LINE_SIZE - 1)) { 123 if (start & (CACHE_LINE_SIZE - 1)) {
61 start &= ~(CACHE_LINE_SIZE - 1); 124 start &= ~(CACHE_LINE_SIZE - 1);
62 sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); 125 debug_writel(0x03);
126 l2x0_flush_line(start);
127 debug_writel(0x00);
63 start += CACHE_LINE_SIZE; 128 start += CACHE_LINE_SIZE;
64 } 129 }
65 130
66 if (end & (CACHE_LINE_SIZE - 1)) { 131 if (end & (CACHE_LINE_SIZE - 1)) {
67 end &= ~(CACHE_LINE_SIZE - 1); 132 end &= ~(CACHE_LINE_SIZE - 1);
68 sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); 133 debug_writel(0x03);
134 l2x0_flush_line(end);
135 debug_writel(0x00);
69 } 136 }
70 137
71 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 138 while (start < end) {
72 sync_writel(addr, L2X0_INV_LINE_PA, 1); 139 unsigned long blk_end = start + min(end - start, 4096UL);
140
141 while (start < blk_end) {
142 l2x0_inv_line(start);
143 start += CACHE_LINE_SIZE;
144 }
145
146 if (blk_end < end) {
147 spin_unlock_irqrestore(&l2x0_lock, flags);
148 spin_lock_irqsave(&l2x0_lock, flags);
149 }
150 }
151 cache_wait(base + L2X0_INV_LINE_PA, 1);
73 cache_sync(); 152 cache_sync();
153 spin_unlock_irqrestore(&l2x0_lock, flags);
74} 154}
75 155
76static void l2x0_clean_range(unsigned long start, unsigned long end) 156static void l2x0_clean_range(unsigned long start, unsigned long end)
77{ 157{
78 unsigned long addr; 158 void __iomem *base = l2x0_base;
159 unsigned long flags;
79 160
161 spin_lock_irqsave(&l2x0_lock, flags);
80 start &= ~(CACHE_LINE_SIZE - 1); 162 start &= ~(CACHE_LINE_SIZE - 1);
81 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 163 while (start < end) {
82 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); 164 unsigned long blk_end = start + min(end - start, 4096UL);
165
166 while (start < blk_end) {
167 l2x0_clean_line(start);
168 start += CACHE_LINE_SIZE;
169 }
170
171 if (blk_end < end) {
172 spin_unlock_irqrestore(&l2x0_lock, flags);
173 spin_lock_irqsave(&l2x0_lock, flags);
174 }
175 }
176 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
83 cache_sync(); 177 cache_sync();
178 spin_unlock_irqrestore(&l2x0_lock, flags);
84} 179}
85 180
86static void l2x0_flush_range(unsigned long start, unsigned long end) 181static void l2x0_flush_range(unsigned long start, unsigned long end)
87{ 182{
88 unsigned long addr; 183 void __iomem *base = l2x0_base;
184 unsigned long flags;
89 185
186 spin_lock_irqsave(&l2x0_lock, flags);
90 start &= ~(CACHE_LINE_SIZE - 1); 187 start &= ~(CACHE_LINE_SIZE - 1);
91 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 188 while (start < end) {
92 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); 189 unsigned long blk_end = start + min(end - start, 4096UL);
190
191 debug_writel(0x03);
192 while (start < blk_end) {
193 l2x0_flush_line(start);
194 start += CACHE_LINE_SIZE;
195 }
196 debug_writel(0x00);
197
198 if (blk_end < end) {
199 spin_unlock_irqrestore(&l2x0_lock, flags);
200 spin_lock_irqsave(&l2x0_lock, flags);
201 }
202 }
203 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 cache_sync(); 204 cache_sync();
205 spin_unlock_irqrestore(&l2x0_lock, flags);
94} 206}
95 207
96void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 208void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
@@ -99,22 +211,30 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
99 211
100 l2x0_base = base; 212 l2x0_base = base;
101 213
102 /* disable L2X0 */ 214 /*
103 writel(0, l2x0_base + L2X0_CTRL); 215 * Check if l2x0 controller is already enabled.
216 * If you are booting from non-secure mode
217 * accessing the below registers will fault.
218 */
219 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
220
221 /* l2x0 controller is disabled */
104 222
105 aux = readl(l2x0_base + L2X0_AUX_CTRL); 223 aux = readl(l2x0_base + L2X0_AUX_CTRL);
106 aux &= aux_mask; 224 aux &= aux_mask;
107 aux |= aux_val; 225 aux |= aux_val;
108 writel(aux, l2x0_base + L2X0_AUX_CTRL); 226 writel(aux, l2x0_base + L2X0_AUX_CTRL);
109 227
110 l2x0_inv_all(); 228 l2x0_inv_all();
111 229
112 /* enable L2X0 */ 230 /* enable L2X0 */
113 writel(1, l2x0_base + L2X0_CTRL); 231 writel(1, l2x0_base + L2X0_CTRL);
232 }
114 233
115 outer_cache.inv_range = l2x0_inv_range; 234 outer_cache.inv_range = l2x0_inv_range;
116 outer_cache.clean_range = l2x0_clean_range; 235 outer_cache.clean_range = l2x0_clean_range;
117 outer_cache.flush_range = l2x0_flush_range; 236 outer_cache.flush_range = l2x0_flush_range;
237 outer_cache.sync = l2x0_cache_sync;
118 238
119 printk(KERN_INFO "L2X0 cache controller enabled\n"); 239 printk(KERN_INFO "L2X0 cache controller enabled\n");
120} 240}