aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-01 08:21:36 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-01 08:21:36 -0400
commit6f3795788b030c3c190fa063adfe519e016cc6fd (patch)
treeee7ed849f0d616bf6c89cbee4637c4c6d2866a5a /arch/sh/mm/cache.c
parent983f4c514c4c9ddac1077a2c805fd16cbe3f7487 (diff)
sh: Fix up UP deadlock with SMP-aware cache ops.
This builds on top of the previous reversion and implements a special on_each_cpu() variant that simple disables preemption across the call while leaving the interrupt state to the function itself. There were some unintended consequences with IRQ disabling in some of these paths on UP that ran in to a deadlock scenario with IRQs being missed. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r--arch/sh/mm/cache.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 411fe6058429..db2b1c5beffd 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -34,6 +34,15 @@ static inline void noop__flush_region(void *start, int size)
34{ 34{
35} 35}
36 36
37static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
38 int wait)
39{
40 preempt_disable();
41 smp_call_function(func, info, wait);
42 func(info);
43 preempt_enable();
44}
45
37void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 46void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
38 unsigned long vaddr, void *dst, const void *src, 47 unsigned long vaddr, void *dst, const void *src,
39 unsigned long len) 48 unsigned long len)
@@ -149,17 +158,17 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
149 158
150void flush_cache_all(void) 159void flush_cache_all(void)
151{ 160{
152 on_each_cpu(local_flush_cache_all, NULL, 1); 161 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
153} 162}
154 163
155void flush_cache_mm(struct mm_struct *mm) 164void flush_cache_mm(struct mm_struct *mm)
156{ 165{
157 on_each_cpu(local_flush_cache_mm, mm, 1); 166 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
158} 167}
159 168
160void flush_cache_dup_mm(struct mm_struct *mm) 169void flush_cache_dup_mm(struct mm_struct *mm)
161{ 170{
162 on_each_cpu(local_flush_cache_dup_mm, mm, 1); 171 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
163} 172}
164 173
165void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, 174void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
@@ -171,7 +180,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
171 data.addr1 = addr; 180 data.addr1 = addr;
172 data.addr2 = pfn; 181 data.addr2 = pfn;
173 182
174 on_each_cpu(local_flush_cache_page, (void *)&data, 1); 183 cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
175} 184}
176 185
177void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 186void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
@@ -183,12 +192,12 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
183 data.addr1 = start; 192 data.addr1 = start;
184 data.addr2 = end; 193 data.addr2 = end;
185 194
186 on_each_cpu(local_flush_cache_range, (void *)&data, 1); 195 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
187} 196}
188 197
189void flush_dcache_page(struct page *page) 198void flush_dcache_page(struct page *page)
190{ 199{
191 on_each_cpu(local_flush_dcache_page, page, 1); 200 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
192} 201}
193 202
194void flush_icache_range(unsigned long start, unsigned long end) 203void flush_icache_range(unsigned long start, unsigned long end)
@@ -199,18 +208,18 @@ void flush_icache_range(unsigned long start, unsigned long end)
199 data.addr1 = start; 208 data.addr1 = start;
200 data.addr2 = end; 209 data.addr2 = end;
201 210
202 on_each_cpu(local_flush_icache_range, (void *)&data, 1); 211 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
203} 212}
204 213
205void flush_icache_page(struct vm_area_struct *vma, struct page *page) 214void flush_icache_page(struct vm_area_struct *vma, struct page *page)
206{ 215{
207 /* Nothing uses the VMA, so just pass the struct page along */ 216 /* Nothing uses the VMA, so just pass the struct page along */
208 on_each_cpu(local_flush_icache_page, page, 1); 217 cacheop_on_each_cpu(local_flush_icache_page, page, 1);
209} 218}
210 219
211void flush_cache_sigtramp(unsigned long address) 220void flush_cache_sigtramp(unsigned long address)
212{ 221{
213 on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); 222 cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
214} 223}
215 224
216static void compute_alias(struct cache_info *c) 225static void compute_alias(struct cache_info *c)