aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-13 23:49:31 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-13 23:49:31 -0400
commit5d83d66635bb1642f3c6a3690c28ff4afdf1ae5f (patch)
treefb3f20377d8567af11be07c383ff21bf5fc6850a
parentb25e74b1be321613bf33492cd9d2e5dd0924562d (diff)
sparc32: Move cache and TLB flushes over to method ops.
This eliminated most of the remaining users of btfixup. There are some complications because of the special cases we have for sun4d, leon, and some flavors of viking. It was found that there are no cases where a flush_page_for_dma method was not hooked up to something, so the "noflush" iommu methods were removed. Add some documentation to the viking_sun4d_smp_ops to describe exactly the hardware bug which causes us to need special TLB flushing on sun4d. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h73
-rw-r--r--arch/sparc/include/asm/cachetlb_32.h29
-rw-r--r--arch/sparc/include/asm/tlbflush_32.h56
-rw-r--r--arch/sparc/kernel/leon_kernel.c2
-rw-r--r--arch/sparc/kernel/leon_smp.c26
-rw-r--r--arch/sparc/kernel/smp_32.c122
-rw-r--r--arch/sparc/kernel/sun4d_irq.c3
-rw-r--r--arch/sparc/kernel/sun4d_smp.c26
-rw-r--r--arch/sparc/kernel/sun4m_irq.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c22
-rw-r--r--arch/sparc/mm/btfixup.c11
-rw-r--r--arch/sparc/mm/iommu.c38
-rw-r--r--arch/sparc/mm/srmmu.c534
13 files changed, 466 insertions, 478 deletions
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index 68431b47a22a..bb014c24f318 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -1,56 +1,18 @@
1#ifndef _SPARC_CACHEFLUSH_H 1#ifndef _SPARC_CACHEFLUSH_H
2#define _SPARC_CACHEFLUSH_H 2#define _SPARC_CACHEFLUSH_H
3 3
4#include <linux/mm.h> /* Common for other includes */ 4#include <asm/cachetlb_32.h>
5// #include <linux/kernel.h> from pgalloc.h 5
6// #include <linux/sched.h> from pgalloc.h 6#define flush_cache_all() \
7 7 sparc32_cachetlb_ops->cache_all()
8// #include <asm/page.h> 8#define flush_cache_mm(mm) \
9#include <asm/btfixup.h> 9 sparc32_cachetlb_ops->cache_mm(mm)
10 10#define flush_cache_dup_mm(mm) \
11/* 11 sparc32_cachetlb_ops->cache_mm(mm)
12 * Fine grained cache flushing. 12#define flush_cache_range(vma,start,end) \
13 */ 13 sparc32_cachetlb_ops->cache_range(vma, start, end)
14#ifdef CONFIG_SMP 14#define flush_cache_page(vma,addr,pfn) \
15 15 sparc32_cachetlb_ops->cache_page(vma, addr)
16BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20
21#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25
26BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28
29#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31
32extern void smp_flush_cache_all(void);
33extern void smp_flush_cache_mm(struct mm_struct *mm);
34extern void smp_flush_cache_range(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end);
37extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38
39extern void smp_flush_page_to_ram(unsigned long page);
40extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41
42#endif /* CONFIG_SMP */
43
44BTFIXUPDEF_CALL(void, flush_cache_all, void)
45BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
54#define flush_icache_range(start, end) do { } while (0) 16#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma, pg) do { } while (0) 17#define flush_icache_page(vma, pg) do { } while (0)
56 18
@@ -67,11 +29,12 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
67 memcpy(dst, src, len); \ 29 memcpy(dst, src, len); \
68 } while (0) 30 } while (0)
69 31
70BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long) 32#define __flush_page_to_ram(addr) \
71BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long) 33 sparc32_cachetlb_ops->page_to_ram(addr)
72 34#define flush_sig_insns(mm,insn_addr) \
73#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr) 35 sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
74#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr) 36#define flush_page_for_dma(addr) \
37 sparc32_cachetlb_ops->page_for_dma(addr)
75 38
76extern void sparc_flush_page_to_ram(struct page *page); 39extern void sparc_flush_page_to_ram(struct page *page);
77 40
diff --git a/arch/sparc/include/asm/cachetlb_32.h b/arch/sparc/include/asm/cachetlb_32.h
new file mode 100644
index 000000000000..efb19889a083
--- /dev/null
+++ b/arch/sparc/include/asm/cachetlb_32.h
@@ -0,0 +1,29 @@
1#ifndef _SPARC_CACHETLB_H
2#define _SPARC_CACHETLB_H
3
4struct mm_struct;
5struct vm_area_struct;
6
7struct sparc32_cachetlb_ops {
8 void (*cache_all)(void);
9 void (*cache_mm)(struct mm_struct *);
10 void (*cache_range)(struct vm_area_struct *, unsigned long,
11 unsigned long);
12 void (*cache_page)(struct vm_area_struct *, unsigned long);
13
14 void (*tlb_all)(void);
15 void (*tlb_mm)(struct mm_struct *);
16 void (*tlb_range)(struct vm_area_struct *, unsigned long,
17 unsigned long);
18 void (*tlb_page)(struct vm_area_struct *, unsigned long);
19
20 void (*page_to_ram)(unsigned long);
21 void (*sig_insns)(struct mm_struct *, unsigned long);
22 void (*page_for_dma)(unsigned long);
23};
24extern const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
25#ifdef CONFIG_SMP
26extern const struct sparc32_cachetlb_ops *local_ops;
27#endif
28
29#endif /* SPARC_CACHETLB_H */
diff --git a/arch/sparc/include/asm/tlbflush_32.h b/arch/sparc/include/asm/tlbflush_32.h
index fe0a71abc9bb..a5c4142130f5 100644
--- a/arch/sparc/include/asm/tlbflush_32.h
+++ b/arch/sparc/include/asm/tlbflush_32.h
@@ -1,52 +1,16 @@
1#ifndef _SPARC_TLBFLUSH_H 1#ifndef _SPARC_TLBFLUSH_H
2#define _SPARC_TLBFLUSH_H 2#define _SPARC_TLBFLUSH_H
3 3
4#include <linux/mm.h> 4#include <asm/cachetlb_32.h>
5// #include <asm/processor.h> 5
6 6#define flush_tlb_all() \
7/* 7 sparc32_cachetlb_ops->tlb_all()
8 * TLB flushing: 8#define flush_tlb_mm(mm) \
9 * 9 sparc32_cachetlb_ops->tlb_mm(mm)
10 * - flush_tlb() flushes the current mm struct TLBs XXX Exists? 10#define flush_tlb_range(vma, start, end) \
11 * - flush_tlb_all() flushes all processes TLBs 11 sparc32_cachetlb_ops->tlb_range(vma, start, end)
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 12#define flush_tlb_page(vma, addr) \
13 * - flush_tlb_page(vma, vmaddr) flushes one page 13 sparc32_cachetlb_ops->tlb_page(vma, addr)
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 */
17
18#ifdef CONFIG_SMP
19
20BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
21BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
22BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
23BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
24
25#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
26#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
27#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
28#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
29
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_mm(struct mm_struct *mm);
32extern void smp_flush_tlb_range(struct vm_area_struct *vma,
33 unsigned long start,
34 unsigned long end);
35extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
36
37#endif /* CONFIG_SMP */
38
39BTFIXUPDEF_CALL(void, flush_tlb_all, void)
40BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
41BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
42BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
43
44#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
45#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
46#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
47#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
48
49// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
50 14
51/* 15/*
52 * This is a kludge, until I know better. --zaitcev XXX 16 * This is a kludge, until I know better. --zaitcev XXX
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index e57435f314fe..aeb411cd3927 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -427,7 +427,7 @@ void __init leon_init_timers(void)
427 */ 427 */
428 local_irq_save(flags); 428 local_irq_save(flags);
429 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ 429 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
430 local_flush_cache_all(); 430 local_ops->cache_all();
431 local_irq_restore(flags); 431 local_irq_restore(flags);
432 } 432 }
433#endif 433#endif
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 356dfc45cdd0..f3e3630e31a3 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -75,8 +75,8 @@ void __cpuinit leon_callin(void)
75{ 75{
76 int cpuid = hard_smpleon_processor_id(); 76 int cpuid = hard_smpleon_processor_id();
77 77
78 local_flush_cache_all(); 78 local_ops->cache_all();
79 local_flush_tlb_all(); 79 local_ops->tlb_all();
80 leon_configure_cache_smp(); 80 leon_configure_cache_smp();
81 81
82 notify_cpu_starting(cpuid); 82 notify_cpu_starting(cpuid);
@@ -87,8 +87,8 @@ void __cpuinit leon_callin(void)
87 calibrate_delay(); 87 calibrate_delay();
88 smp_store_cpu_info(cpuid); 88 smp_store_cpu_info(cpuid);
89 89
90 local_flush_cache_all(); 90 local_ops->cache_all();
91 local_flush_tlb_all(); 91 local_ops->tlb_all();
92 92
93 /* 93 /*
94 * Unblock the master CPU _only_ when the scheduler state 94 * Unblock the master CPU _only_ when the scheduler state
@@ -99,8 +99,8 @@ void __cpuinit leon_callin(void)
99 */ 99 */
100 do_swap(&cpu_callin_map[cpuid], 1); 100 do_swap(&cpu_callin_map[cpuid], 1);
101 101
102 local_flush_cache_all(); 102 local_ops->cache_all();
103 local_flush_tlb_all(); 103 local_ops->tlb_all();
104 104
105 /* Fix idle thread fields. */ 105 /* Fix idle thread fields. */
106 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid]) 106 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
@@ -143,8 +143,8 @@ void __init leon_configure_cache_smp(void)
143 } 143 }
144 } 144 }
145 145
146 local_flush_cache_all(); 146 local_ops->cache_all();
147 local_flush_tlb_all(); 147 local_ops->tlb_all();
148} 148}
149 149
150void leon_smp_setbroadcast(unsigned int mask) 150void leon_smp_setbroadcast(unsigned int mask)
@@ -199,7 +199,7 @@ void __init leon_boot_cpus(void)
199 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); 199 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
200 200
201 leon_configure_cache_smp(); 201 leon_configure_cache_smp();
202 local_flush_cache_all(); 202 local_ops->cache_all();
203 203
204} 204}
205 205
@@ -226,7 +226,7 @@ int __cpuinit leon_boot_one_cpu(int i)
226 /* whirrr, whirrr, whirrrrrrrrr... */ 226 /* whirrr, whirrr, whirrrrrrrrr... */
227 printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, 227 printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
228 (unsigned int)&leon3_irqctrl_regs->mpstatus); 228 (unsigned int)&leon3_irqctrl_regs->mpstatus);
229 local_flush_cache_all(); 229 local_ops->cache_all();
230 230
231 /* Make sure all IRQs are of from the start for this new CPU */ 231 /* Make sure all IRQs are of from the start for this new CPU */
232 LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0); 232 LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
@@ -251,7 +251,7 @@ int __cpuinit leon_boot_one_cpu(int i)
251 leon_enable_irq_cpu(leon_ipi_irq, i); 251 leon_enable_irq_cpu(leon_ipi_irq, i);
252 } 252 }
253 253
254 local_flush_cache_all(); 254 local_ops->cache_all();
255 return 0; 255 return 0;
256} 256}
257 257
@@ -271,7 +271,7 @@ void __init leon_smp_done(void)
271 } 271 }
272 } 272 }
273 *prev = first; 273 *prev = first;
274 local_flush_cache_all(); 274 local_ops->cache_all();
275 275
276 /* Free unneeded trap tables */ 276 /* Free unneeded trap tables */
277 if (!cpu_present(1)) { 277 if (!cpu_present(1)) {
@@ -337,7 +337,7 @@ static void __init leon_ipi_init(void)
337 local_irq_save(flags); 337 local_irq_save(flags);
338 trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)]; 338 trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
339 trap_table->inst_three += smpleon_ipi - real_irq_entry; 339 trap_table->inst_three += smpleon_ipi - real_irq_entry;
340 local_flush_cache_all(); 340 local_ops->cache_all();
341 local_irq_restore(flags); 341 local_irq_restore(flags);
342 342
343 for_each_possible_cpu(cpu) { 343 for_each_possible_cpu(cpu) {
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 7c11439b44a1..8cd5c79f6193 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -171,128 +171,6 @@ void smp_call_function_interrupt(void)
171 irq_exit(); 171 irq_exit();
172} 172}
173 173
174void smp_flush_cache_all(void)
175{
176 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
177 local_flush_cache_all();
178}
179
180void smp_flush_tlb_all(void)
181{
182 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
183 local_flush_tlb_all();
184}
185
186void smp_flush_cache_mm(struct mm_struct *mm)
187{
188 if(mm->context != NO_CONTEXT) {
189 cpumask_t cpu_mask;
190 cpumask_copy(&cpu_mask, mm_cpumask(mm));
191 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
192 if (!cpumask_empty(&cpu_mask))
193 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
194 local_flush_cache_mm(mm);
195 }
196}
197
198void smp_flush_tlb_mm(struct mm_struct *mm)
199{
200 if(mm->context != NO_CONTEXT) {
201 cpumask_t cpu_mask;
202 cpumask_copy(&cpu_mask, mm_cpumask(mm));
203 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
204 if (!cpumask_empty(&cpu_mask)) {
205 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
206 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
207 cpumask_copy(mm_cpumask(mm),
208 cpumask_of(smp_processor_id()));
209 }
210 local_flush_tlb_mm(mm);
211 }
212}
213
214void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
215 unsigned long end)
216{
217 struct mm_struct *mm = vma->vm_mm;
218
219 if (mm->context != NO_CONTEXT) {
220 cpumask_t cpu_mask;
221 cpumask_copy(&cpu_mask, mm_cpumask(mm));
222 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
223 if (!cpumask_empty(&cpu_mask))
224 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
225 local_flush_cache_range(vma, start, end);
226 }
227}
228
229void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
230 unsigned long end)
231{
232 struct mm_struct *mm = vma->vm_mm;
233
234 if (mm->context != NO_CONTEXT) {
235 cpumask_t cpu_mask;
236 cpumask_copy(&cpu_mask, mm_cpumask(mm));
237 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
238 if (!cpumask_empty(&cpu_mask))
239 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
240 local_flush_tlb_range(vma, start, end);
241 }
242}
243
244void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
245{
246 struct mm_struct *mm = vma->vm_mm;
247
248 if(mm->context != NO_CONTEXT) {
249 cpumask_t cpu_mask;
250 cpumask_copy(&cpu_mask, mm_cpumask(mm));
251 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
252 if (!cpumask_empty(&cpu_mask))
253 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
254 local_flush_cache_page(vma, page);
255 }
256}
257
258void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
259{
260 struct mm_struct *mm = vma->vm_mm;
261
262 if(mm->context != NO_CONTEXT) {
263 cpumask_t cpu_mask;
264 cpumask_copy(&cpu_mask, mm_cpumask(mm));
265 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
266 if (!cpumask_empty(&cpu_mask))
267 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
268 local_flush_tlb_page(vma, page);
269 }
270}
271
272void smp_flush_page_to_ram(unsigned long page)
273{
274 /* Current theory is that those who call this are the one's
275 * who have just dirtied their cache with the pages contents
276 * in kernel space, therefore we only run this on local cpu.
277 *
278 * XXX This experiment failed, research further... -DaveM
279 */
280#if 1
281 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
282#endif
283 local_flush_page_to_ram(page);
284}
285
286void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
287{
288 cpumask_t cpu_mask;
289 cpumask_copy(&cpu_mask, mm_cpumask(mm));
290 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
291 if (!cpumask_empty(&cpu_mask))
292 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
293 local_flush_sig_insns(mm, insn_addr);
294}
295
296int setup_profiling_timer(unsigned int multiplier) 174int setup_profiling_timer(unsigned int multiplier)
297{ 175{
298 return -EINVAL; 176 return -EINVAL;
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 15593ee1c120..b2fdb3d78c19 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -15,6 +15,7 @@
15#include <asm/sbi.h> 15#include <asm/sbi.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/setup.h> 17#include <asm/setup.h>
18#include <asm/oplib.h>
18 19
19#include "kernel.h" 20#include "kernel.h"
20#include "irq.h" 21#include "irq.h"
@@ -411,7 +412,7 @@ static void __init sun4d_fixup_trap_table(void)
411 trap_table->inst_two = lvl14_save[1]; 412 trap_table->inst_two = lvl14_save[1];
412 trap_table->inst_three = lvl14_save[2]; 413 trap_table->inst_three = lvl14_save[2];
413 trap_table->inst_four = lvl14_save[3]; 414 trap_table->inst_four = lvl14_save[3];
414 local_flush_cache_all(); 415 local_ops->cache_all();
415 local_irq_restore(flags); 416 local_irq_restore(flags);
416#endif 417#endif
417} 418}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 576fe74d226b..f17fd287bf7d 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -10,12 +10,14 @@
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/profile.h> 11#include <linux/profile.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/sched.h>
13#include <linux/cpu.h> 14#include <linux/cpu.h>
14 15
15#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
16#include <asm/switch_to.h> 17#include <asm/switch_to.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18#include <asm/timer.h> 19#include <asm/timer.h>
20#include <asm/oplib.h>
19#include <asm/sbi.h> 21#include <asm/sbi.h>
20#include <asm/mmu.h> 22#include <asm/mmu.h>
21 23
@@ -60,8 +62,8 @@ void __cpuinit smp4d_callin(void)
60 /* Enable level15 interrupt, disable level14 interrupt for now */ 62 /* Enable level15 interrupt, disable level14 interrupt for now */
61 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); 63 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
62 64
63 local_flush_cache_all(); 65 local_ops->cache_all();
64 local_flush_tlb_all(); 66 local_ops->tlb_all();
65 67
66 notify_cpu_starting(cpuid); 68 notify_cpu_starting(cpuid);
67 /* 69 /*
@@ -75,13 +77,13 @@ void __cpuinit smp4d_callin(void)
75 77
76 calibrate_delay(); 78 calibrate_delay();
77 smp_store_cpu_info(cpuid); 79 smp_store_cpu_info(cpuid);
78 local_flush_cache_all(); 80 local_ops->cache_all();
79 local_flush_tlb_all(); 81 local_ops->tlb_all();
80 82
81 /* Allow master to continue. */ 83 /* Allow master to continue. */
82 sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); 84 sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
83 local_flush_cache_all(); 85 local_ops->cache_all();
84 local_flush_tlb_all(); 86 local_ops->tlb_all();
85 87
86 while ((unsigned long)current_set[cpuid] < PAGE_OFFSET) 88 while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
87 barrier(); 89 barrier();
@@ -101,8 +103,8 @@ void __cpuinit smp4d_callin(void)
101 atomic_inc(&init_mm.mm_count); 103 atomic_inc(&init_mm.mm_count);
102 current->active_mm = &init_mm; 104 current->active_mm = &init_mm;
103 105
104 local_flush_cache_all(); 106 local_ops->cache_all();
105 local_flush_tlb_all(); 107 local_ops->tlb_all();
106 108
107 local_irq_enable(); /* We don't allow PIL 14 yet */ 109 local_irq_enable(); /* We don't allow PIL 14 yet */
108 110
@@ -124,7 +126,7 @@ void __init smp4d_boot_cpus(void)
124 smp4d_ipi_init(); 126 smp4d_ipi_init();
125 if (boot_cpu_id) 127 if (boot_cpu_id)
126 current_set[0] = NULL; 128 current_set[0] = NULL;
127 local_flush_cache_all(); 129 local_ops->cache_all();
128} 130}
129 131
130int __cpuinit smp4d_boot_one_cpu(int i) 132int __cpuinit smp4d_boot_one_cpu(int i)
@@ -150,7 +152,7 @@ int __cpuinit smp4d_boot_one_cpu(int i)
150 152
151 /* whirrr, whirrr, whirrrrrrrrr... */ 153 /* whirrr, whirrr, whirrrrrrrrr... */
152 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); 154 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
153 local_flush_cache_all(); 155 local_ops->cache_all();
154 prom_startcpu(cpu_node, 156 prom_startcpu(cpu_node,
155 &smp_penguin_ctable, 0, (char *)entry); 157 &smp_penguin_ctable, 0, (char *)entry);
156 158
@@ -168,7 +170,7 @@ int __cpuinit smp4d_boot_one_cpu(int i)
168 return -ENODEV; 170 return -ENODEV;
169 171
170 } 172 }
171 local_flush_cache_all(); 173 local_ops->cache_all();
172 return 0; 174 return 0;
173} 175}
174 176
@@ -185,7 +187,7 @@ void __init smp4d_smp_done(void)
185 prev = &cpu_data(i).next; 187 prev = &cpu_data(i).next;
186 } 188 }
187 *prev = first; 189 *prev = first;
188 local_flush_cache_all(); 190 local_ops->cache_all();
189 191
190 /* Ok, they are spinning and ready to go. */ 192 /* Ok, they are spinning and ready to go. */
191 smp_processors_ready = 1; 193 smp_processors_ready = 1;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 93f46035ce7c..32d3a5ce50f3 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -431,7 +431,7 @@ static void __init sun4m_init_timers(void)
431 trap_table->inst_two = lvl14_save[1]; 431 trap_table->inst_two = lvl14_save[1];
432 trap_table->inst_three = lvl14_save[2]; 432 trap_table->inst_three = lvl14_save[2];
433 trap_table->inst_four = lvl14_save[3]; 433 trap_table->inst_four = lvl14_save[3];
434 local_flush_cache_all(); 434 local_ops->cache_all();
435 local_irq_restore(flags); 435 local_irq_restore(flags);
436 } 436 }
437#endif 437#endif
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 29f8ace10b59..afcf6743f0eb 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -8,12 +8,14 @@
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/profile.h> 9#include <linux/profile.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/sched.h>
11#include <linux/cpu.h> 12#include <linux/cpu.h>
12 13
13#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
14#include <asm/switch_to.h> 15#include <asm/switch_to.h>
15#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
16#include <asm/timer.h> 17#include <asm/timer.h>
18#include <asm/oplib.h>
17 19
18#include "irq.h" 20#include "irq.h"
19#include "kernel.h" 21#include "kernel.h"
@@ -38,8 +40,8 @@ void __cpuinit smp4m_callin(void)
38{ 40{
39 int cpuid = hard_smp_processor_id(); 41 int cpuid = hard_smp_processor_id();
40 42
41 local_flush_cache_all(); 43 local_ops->cache_all();
42 local_flush_tlb_all(); 44 local_ops->tlb_all();
43 45
44 notify_cpu_starting(cpuid); 46 notify_cpu_starting(cpuid);
45 47
@@ -48,8 +50,8 @@ void __cpuinit smp4m_callin(void)
48 calibrate_delay(); 50 calibrate_delay();
49 smp_store_cpu_info(cpuid); 51 smp_store_cpu_info(cpuid);
50 52
51 local_flush_cache_all(); 53 local_ops->cache_all();
52 local_flush_tlb_all(); 54 local_ops->tlb_all();
53 55
54 /* 56 /*
55 * Unblock the master CPU _only_ when the scheduler state 57 * Unblock the master CPU _only_ when the scheduler state
@@ -61,8 +63,8 @@ void __cpuinit smp4m_callin(void)
61 swap_ulong(&cpu_callin_map[cpuid], 1); 63 swap_ulong(&cpu_callin_map[cpuid], 1);
62 64
63 /* XXX: What's up with all the flushes? */ 65 /* XXX: What's up with all the flushes? */
64 local_flush_cache_all(); 66 local_ops->cache_all();
65 local_flush_tlb_all(); 67 local_ops->tlb_all();
66 68
67 /* Fix idle thread fields. */ 69 /* Fix idle thread fields. */
68 __asm__ __volatile__("ld [%0], %%g6\n\t" 70 __asm__ __volatile__("ld [%0], %%g6\n\t"
@@ -88,7 +90,7 @@ void __init smp4m_boot_cpus(void)
88{ 90{
89 smp4m_ipi_init(); 91 smp4m_ipi_init();
90 sun4m_unmask_profile_irq(); 92 sun4m_unmask_profile_irq();
91 local_flush_cache_all(); 93 local_ops->cache_all();
92} 94}
93 95
94int __cpuinit smp4m_boot_one_cpu(int i) 96int __cpuinit smp4m_boot_one_cpu(int i)
@@ -117,7 +119,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
117 119
118 /* whirrr, whirrr, whirrrrrrrrr... */ 120 /* whirrr, whirrr, whirrrrrrrrr... */
119 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); 121 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
120 local_flush_cache_all(); 122 local_ops->cache_all();
121 prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); 123 prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
122 124
123 /* wheee... it's going... */ 125 /* wheee... it's going... */
@@ -132,7 +134,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
132 return -ENODEV; 134 return -ENODEV;
133 } 135 }
134 136
135 local_flush_cache_all(); 137 local_ops->cache_all();
136 return 0; 138 return 0;
137} 139}
138 140
@@ -149,7 +151,7 @@ void __init smp4m_smp_done(void)
149 prev = &cpu_data(i).next; 151 prev = &cpu_data(i).next;
150 } 152 }
151 *prev = first; 153 *prev = first;
152 local_flush_cache_all(); 154 local_ops->cache_all();
153 155
154 /* Ok, they are spinning and ready to go. */ 156 /* Ok, they are spinning and ready to go. */
155} 157}
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 1b7aa565497e..dcbb7ffcc82e 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -38,7 +38,6 @@ static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]
38static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n"; 38static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
39static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n"; 39static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
40static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n"; 40static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
41static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
42static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n"; 41static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
43 42
44#ifdef BTFIXUP_OPTIMIZE_OTHER 43#ifdef BTFIXUP_OPTIMIZE_OTHER
@@ -75,7 +74,6 @@ void __init btfixup(void)
75 unsigned insn; 74 unsigned insn;
76 unsigned *addr; 75 unsigned *addr;
77 int fmangled = 0; 76 int fmangled = 0;
78 void (*flush_cacheall)(void);
79 77
80 if (!visited) { 78 if (!visited) {
81 visited++; 79 visited++;
@@ -311,13 +309,8 @@ void __init btfixup(void)
311 p = q + count; 309 p = q + count;
312 } 310 }
313#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
314 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all); 312 local_ops->cache_all();
315#else 313#else
316 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all); 314 sparc32_cachetlb_ops->cache_all();
317#endif 315#endif
318 if (!flush_cacheall) {
319 prom_printf(fca_und);
320 prom_halt();
321 }
322 (*flush_cacheall)();
323} 316}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index c64f81e370aa..720bea2c7fdd 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -39,8 +39,6 @@
39 39
40/* srmmu.c */ 40/* srmmu.c */
41extern int viking_mxcc_present; 41extern int viking_mxcc_present;
42BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44extern int flush_page_for_dma_global; 42extern int flush_page_for_dma_global;
45static int viking_flush; 43static int viking_flush;
46/* viking.S */ 44/* viking.S */
@@ -216,11 +214,6 @@ static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
216 return busa + off; 214 return busa + off;
217} 215}
218 216
219static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
220{
221 return iommu_get_scsi_one(dev, vaddr, len);
222}
223
224static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) 217static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
225{ 218{
226 flush_page_for_dma(0); 219 flush_page_for_dma(0);
@@ -238,19 +231,6 @@ static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned
238 return iommu_get_scsi_one(dev, vaddr, len); 231 return iommu_get_scsi_one(dev, vaddr, len);
239} 232}
240 233
241static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
242{
243 int n;
244
245 while (sz != 0) {
246 --sz;
247 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
248 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
249 sg->dma_length = sg->length;
250 sg = sg_next(sg);
251 }
252}
253
254static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) 234static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
255{ 235{
256 int n; 236 int n;
@@ -426,17 +406,6 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
426} 406}
427#endif 407#endif
428 408
429static const struct sparc32_dma_ops iommu_dma_noflush_ops = {
430 .get_scsi_one = iommu_get_scsi_one_noflush,
431 .get_scsi_sgl = iommu_get_scsi_sgl_noflush,
432 .release_scsi_one = iommu_release_scsi_one,
433 .release_scsi_sgl = iommu_release_scsi_sgl,
434#ifdef CONFIG_SBUS
435 .map_dma_area = iommu_map_dma_area,
436 .unmap_dma_area = iommu_unmap_dma_area,
437#endif
438};
439
440static const struct sparc32_dma_ops iommu_dma_gflush_ops = { 409static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
441 .get_scsi_one = iommu_get_scsi_one_gflush, 410 .get_scsi_one = iommu_get_scsi_one_gflush,
442 .get_scsi_sgl = iommu_get_scsi_sgl_gflush, 411 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
@@ -461,12 +430,7 @@ static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
461 430
462void __init ld_mmu_iommu(void) 431void __init ld_mmu_iommu(void)
463{ 432{
464 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); 433 if (flush_page_for_dma_global) {
465
466 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
467 /* IO coherent chip */
468 sparc32_dma_ops = &iommu_dma_noflush_ops;
469 } else if (flush_page_for_dma_global) {
470 /* flush_page_for_dma flushes everything, no matter of what page is it */ 434 /* flush_page_for_dma flushes everything, no matter of what page is it */
471 sparc32_dma_ops = &iommu_dma_gflush_ops; 435 sparc32_dma_ops = &iommu_dma_gflush_ops;
472 } else { 436 } else {
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index dc398e5c71a4..cba05fa3fbc7 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -65,24 +65,20 @@ extern unsigned long last_valid_pfn;
65 65
66static pgd_t *srmmu_swapper_pg_dir; 66static pgd_t *srmmu_swapper_pg_dir;
67 67
68const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
69
68#ifdef CONFIG_SMP 70#ifdef CONFIG_SMP
71const struct sparc32_cachetlb_ops *local_ops;
72
69#define FLUSH_BEGIN(mm) 73#define FLUSH_BEGIN(mm)
70#define FLUSH_END 74#define FLUSH_END
71#else 75#else
72#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { 76#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
73#define FLUSH_END } 77#define FLUSH_END }
74#endif 78#endif
75 79
76BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
77#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78
79int flush_page_for_dma_global = 1; 80int flush_page_for_dma_global = 1;
80 81
81#ifdef CONFIG_SMP
82BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
83#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
84#endif
85
86char *srmmu_name; 82char *srmmu_name;
87 83
88ctxd_t *srmmu_ctx_table_phys; 84ctxd_t *srmmu_ctx_table_phys;
@@ -1126,7 +1122,7 @@ void __init srmmu_paging_init(void)
1126 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 1122 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
1127#ifdef CONFIG_SMP 1123#ifdef CONFIG_SMP
1128 /* Stop from hanging here... */ 1124 /* Stop from hanging here... */
1129 local_flush_tlb_all(); 1125 local_ops->tlb_all();
1130#else 1126#else
1131 flush_tlb_all(); 1127 flush_tlb_all();
1132#endif 1128#endif
@@ -1284,6 +1280,20 @@ static void __cpuinit poke_hypersparc(void)
1284 clear = srmmu_get_fstatus(); 1280 clear = srmmu_get_fstatus();
1285} 1281}
1286 1282
1283static const struct sparc32_cachetlb_ops hypersparc_ops = {
1284 .cache_all = hypersparc_flush_cache_all,
1285 .cache_mm = hypersparc_flush_cache_mm,
1286 .cache_page = hypersparc_flush_cache_page,
1287 .cache_range = hypersparc_flush_cache_range,
1288 .tlb_all = hypersparc_flush_tlb_all,
1289 .tlb_mm = hypersparc_flush_tlb_mm,
1290 .tlb_page = hypersparc_flush_tlb_page,
1291 .tlb_range = hypersparc_flush_tlb_range,
1292 .page_to_ram = hypersparc_flush_page_to_ram,
1293 .sig_insns = hypersparc_flush_sig_insns,
1294 .page_for_dma = hypersparc_flush_page_for_dma,
1295};
1296
1287static void __init init_hypersparc(void) 1297static void __init init_hypersparc(void)
1288{ 1298{
1289 srmmu_name = "ROSS HyperSparc"; 1299 srmmu_name = "ROSS HyperSparc";
@@ -1292,21 +1302,7 @@ static void __init init_hypersparc(void)
1292 init_vac_layout(); 1302 init_vac_layout();
1293 1303
1294 is_hypersparc = 1; 1304 is_hypersparc = 1;
1295 1305 sparc32_cachetlb_ops = &hypersparc_ops;
1296 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1297 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1298 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1299 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1300
1301 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1302 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1303 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1304 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1305
1306 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1307 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1308 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1309
1310 1306
1311 poke_srmmu = poke_hypersparc; 1307 poke_srmmu = poke_hypersparc;
1312 1308
@@ -1352,25 +1348,24 @@ static void __cpuinit poke_cypress(void)
1352 srmmu_set_mmureg(mreg); 1348 srmmu_set_mmureg(mreg);
1353} 1349}
1354 1350
1351static const struct sparc32_cachetlb_ops cypress_ops = {
1352 .cache_all = cypress_flush_cache_all,
1353 .cache_mm = cypress_flush_cache_mm,
1354 .cache_page = cypress_flush_cache_page,
1355 .cache_range = cypress_flush_cache_range,
1356 .tlb_all = cypress_flush_tlb_all,
1357 .tlb_mm = cypress_flush_tlb_mm,
1358 .tlb_page = cypress_flush_tlb_page,
1359 .tlb_range = cypress_flush_tlb_range,
1360 .page_to_ram = cypress_flush_page_to_ram,
1361 .sig_insns = cypress_flush_sig_insns,
1362 .page_for_dma = cypress_flush_page_for_dma,
1363};
1364
1355static void __init init_cypress_common(void) 1365static void __init init_cypress_common(void)
1356{ 1366{
1357 init_vac_layout(); 1367 init_vac_layout();
1358 1368 sparc32_cachetlb_ops = &cypress_ops;
1359 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1360 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1361 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1362 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1363
1364 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1365 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1366 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1367 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1368
1369
1370 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1371 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1372 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1373
1374 poke_srmmu = poke_cypress; 1369 poke_srmmu = poke_cypress;
1375} 1370}
1376 1371
@@ -1421,6 +1416,20 @@ static void __cpuinit poke_swift(void)
1421 srmmu_set_mmureg(mreg); 1416 srmmu_set_mmureg(mreg);
1422} 1417}
1423 1418
1419static const struct sparc32_cachetlb_ops swift_ops = {
1420 .cache_all = swift_flush_cache_all,
1421 .cache_mm = swift_flush_cache_mm,
1422 .cache_page = swift_flush_cache_page,
1423 .cache_range = swift_flush_cache_range,
1424 .tlb_all = swift_flush_tlb_all,
1425 .tlb_mm = swift_flush_tlb_mm,
1426 .tlb_page = swift_flush_tlb_page,
1427 .tlb_range = swift_flush_tlb_range,
1428 .page_to_ram = swift_flush_page_to_ram,
1429 .sig_insns = swift_flush_sig_insns,
1430 .page_for_dma = swift_flush_page_for_dma,
1431};
1432
1424#define SWIFT_MASKID_ADDR 0x10003018 1433#define SWIFT_MASKID_ADDR 0x10003018
1425static void __init init_swift(void) 1434static void __init init_swift(void)
1426{ 1435{
@@ -1471,21 +1480,7 @@ static void __init init_swift(void)
1471 break; 1480 break;
1472 } 1481 }
1473 1482
1474 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1483 sparc32_cachetlb_ops = &swift_ops;
1475 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1476 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1477 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1478
1479
1480 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1481 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1482 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1483 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1484
1485 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1486 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1487 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1488
1489 flush_page_for_dma_global = 0; 1484 flush_page_for_dma_global = 0;
1490 1485
1491 /* 1486 /*
@@ -1618,26 +1613,25 @@ static void __cpuinit poke_turbosparc(void)
1618 srmmu_set_mmureg(mreg); 1613 srmmu_set_mmureg(mreg);
1619} 1614}
1620 1615
1616static const struct sparc32_cachetlb_ops turbosparc_ops = {
1617 .cache_all = turbosparc_flush_cache_all,
1618 .cache_mm = turbosparc_flush_cache_mm,
1619 .cache_page = turbosparc_flush_cache_page,
1620 .cache_range = turbosparc_flush_cache_range,
1621 .tlb_all = turbosparc_flush_tlb_all,
1622 .tlb_mm = turbosparc_flush_tlb_mm,
1623 .tlb_page = turbosparc_flush_tlb_page,
1624 .tlb_range = turbosparc_flush_tlb_range,
1625 .page_to_ram = turbosparc_flush_page_to_ram,
1626 .sig_insns = turbosparc_flush_sig_insns,
1627 .page_for_dma = turbosparc_flush_page_for_dma,
1628};
1629
1621static void __init init_turbosparc(void) 1630static void __init init_turbosparc(void)
1622{ 1631{
1623 srmmu_name = "Fujitsu TurboSparc"; 1632 srmmu_name = "Fujitsu TurboSparc";
1624 srmmu_modtype = TurboSparc; 1633 srmmu_modtype = TurboSparc;
1625 1634 sparc32_cachetlb_ops = &turbosparc_ops;
1626 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1627 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1628 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1629 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1630
1631 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1632 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1633 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1634 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1635
1636 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1637
1638 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1639 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1640
1641 poke_srmmu = poke_turbosparc; 1635 poke_srmmu = poke_turbosparc;
1642} 1636}
1643 1637
@@ -1652,6 +1646,20 @@ static void __cpuinit poke_tsunami(void)
1652 srmmu_set_mmureg(mreg); 1646 srmmu_set_mmureg(mreg);
1653} 1647}
1654 1648
1649static const struct sparc32_cachetlb_ops tsunami_ops = {
1650 .cache_all = tsunami_flush_cache_all,
1651 .cache_mm = tsunami_flush_cache_mm,
1652 .cache_page = tsunami_flush_cache_page,
1653 .cache_range = tsunami_flush_cache_range,
1654 .tlb_all = tsunami_flush_tlb_all,
1655 .tlb_mm = tsunami_flush_tlb_mm,
1656 .tlb_page = tsunami_flush_tlb_page,
1657 .tlb_range = tsunami_flush_tlb_range,
1658 .page_to_ram = tsunami_flush_page_to_ram,
1659 .sig_insns = tsunami_flush_sig_insns,
1660 .page_for_dma = tsunami_flush_page_for_dma,
1661};
1662
1655static void __init init_tsunami(void) 1663static void __init init_tsunami(void)
1656{ 1664{
1657 /* 1665 /*
@@ -1662,22 +1670,7 @@ static void __init init_tsunami(void)
1662 1670
1663 srmmu_name = "TI Tsunami"; 1671 srmmu_name = "TI Tsunami";
1664 srmmu_modtype = Tsunami; 1672 srmmu_modtype = Tsunami;
1665 1673 sparc32_cachetlb_ops = &tsunami_ops;
1666 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1667 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1668 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1669 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1670
1671
1672 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1673 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1674 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1675 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1676
1677 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1678 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1679 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1680
1681 poke_srmmu = poke_tsunami; 1674 poke_srmmu = poke_tsunami;
1682 1675
1683 tsunami_setup_blockops(); 1676 tsunami_setup_blockops();
@@ -1688,7 +1681,7 @@ static void __cpuinit poke_viking(void)
1688 unsigned long mreg = srmmu_get_mmureg(); 1681 unsigned long mreg = srmmu_get_mmureg();
1689 static int smp_catch; 1682 static int smp_catch;
1690 1683
1691 if(viking_mxcc_present) { 1684 if (viking_mxcc_present) {
1692 unsigned long mxcc_control = mxcc_get_creg(); 1685 unsigned long mxcc_control = mxcc_get_creg();
1693 1686
1694 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); 1687 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
@@ -1725,6 +1718,52 @@ static void __cpuinit poke_viking(void)
1725 srmmu_set_mmureg(mreg); 1718 srmmu_set_mmureg(mreg);
1726} 1719}
1727 1720
1721static struct sparc32_cachetlb_ops viking_ops = {
1722 .cache_all = viking_flush_cache_all,
1723 .cache_mm = viking_flush_cache_mm,
1724 .cache_page = viking_flush_cache_page,
1725 .cache_range = viking_flush_cache_range,
1726 .tlb_all = viking_flush_tlb_all,
1727 .tlb_mm = viking_flush_tlb_mm,
1728 .tlb_page = viking_flush_tlb_page,
1729 .tlb_range = viking_flush_tlb_range,
1730 .page_to_ram = viking_flush_page_to_ram,
1731 .sig_insns = viking_flush_sig_insns,
1732 .page_for_dma = viking_flush_page_for_dma,
1733};
1734
1735#ifdef CONFIG_SMP
1736/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1737 * perform the local TLB flush and all the other cpus will see it.
1738 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1739 * that requires that we add some synchronization to these flushes.
1740 *
1741 * The bug is that the fifo which keeps track of all the pending TLB
1742 * broadcasts in the system is an entry or two too small, so if we
1743 * have too many going at once we'll overflow that fifo and lose a TLB
1744 * flush resulting in corruption.
1745 *
1746 * Our workaround is to take a global spinlock around the TLB flushes,
1747 * which guarentees we won't ever have too many pending. It's a big
1748 * hammer, but a semaphore like system to make sure we only have N TLB
1749 * flushes going at once will require SMP locking anyways so there's
1750 * no real value in trying any harder than this.
1751 */
1752static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1753 .cache_all = viking_flush_cache_all,
1754 .cache_mm = viking_flush_cache_mm,
1755 .cache_page = viking_flush_cache_page,
1756 .cache_range = viking_flush_cache_range,
1757 .tlb_all = sun4dsmp_flush_tlb_all,
1758 .tlb_mm = sun4dsmp_flush_tlb_mm,
1759 .tlb_page = sun4dsmp_flush_tlb_page,
1760 .tlb_range = sun4dsmp_flush_tlb_range,
1761 .page_to_ram = viking_flush_page_to_ram,
1762 .sig_insns = viking_flush_sig_insns,
1763 .page_for_dma = viking_flush_page_for_dma,
1764};
1765#endif
1766
1728static void __init init_viking(void) 1767static void __init init_viking(void)
1729{ 1768{
1730 unsigned long mreg = srmmu_get_mmureg(); 1769 unsigned long mreg = srmmu_get_mmureg();
@@ -1742,76 +1781,101 @@ static void __init init_viking(void)
1742 * This is only necessary because of the new way in 1781 * This is only necessary because of the new way in
1743 * which we use the IOMMU. 1782 * which we use the IOMMU.
1744 */ 1783 */
1745 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); 1784 viking_ops.page_for_dma = viking_flush_page;
1746 1785#ifdef CONFIG_SMP
1786 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1787#endif
1747 flush_page_for_dma_global = 0; 1788 flush_page_for_dma_global = 0;
1748 } else { 1789 } else {
1749 srmmu_name = "TI Viking/MXCC"; 1790 srmmu_name = "TI Viking/MXCC";
1750 viking_mxcc_present = 1; 1791 viking_mxcc_present = 1;
1751
1752 srmmu_cache_pagetables = 1; 1792 srmmu_cache_pagetables = 1;
1753
1754 /* MXCC vikings lack the DMA snooping bug. */
1755 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1756 } 1793 }
1757 1794
1758 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); 1795 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1759 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); 1796 &viking_ops;
1760 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1761 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1762
1763#ifdef CONFIG_SMP 1797#ifdef CONFIG_SMP
1764 if (sparc_cpu_model == sun4d) { 1798 if (sparc_cpu_model == sun4d)
1765 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); 1799 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1766 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); 1800 &viking_sun4d_smp_ops;
1767 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1768 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1769 } else
1770#endif 1801#endif
1771 {
1772 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1773 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1774 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1775 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1776 }
1777
1778 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1779 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1780 1802
1781 poke_srmmu = poke_viking; 1803 poke_srmmu = poke_viking;
1782} 1804}
1783 1805
1784#ifdef CONFIG_SPARC_LEON 1806#ifdef CONFIG_SPARC_LEON
1807static void leon_flush_cache_mm(struct mm_struct *mm)
1808{
1809 leon_flush_cache_all();
1810}
1785 1811
1786void __init poke_leonsparc(void) 1812static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1787{ 1813{
1814 leon_flush_pcache_all(vma, page);
1788} 1815}
1789 1816
1790void __init init_leon(void) 1817static void leon_flush_cache_range(struct vm_area_struct *vma,
1818 unsigned long start,
1819 unsigned long end)
1791{ 1820{
1821 leon_flush_cache_all();
1822}
1792 1823
1793 srmmu_name = "LEON"; 1824static void leon_flush_tlb_mm(struct mm_struct *mm)
1825{
1826 leon_flush_tlb_all();
1827}
1828
1829static void leon_flush_tlb_page(struct vm_area_struct *vma,
1830 unsigned long page)
1831{
1832 leon_flush_tlb_all();
1833}
1794 1834
1795 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, 1835static void leon_flush_tlb_range(struct vm_area_struct *vma,
1796 BTFIXUPCALL_NORM); 1836 unsigned long start,
1797 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, 1837 unsigned long end)
1798 BTFIXUPCALL_NORM); 1838{
1799 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, 1839 leon_flush_tlb_all();
1800 BTFIXUPCALL_NORM); 1840}
1801 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, 1841
1802 BTFIXUPCALL_NORM); 1842static void leon_flush_page_to_ram(unsigned long page)
1803 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, 1843{
1804 BTFIXUPCALL_NORM); 1844 leon_flush_cache_all();
1805 1845}
1806 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1846
1807 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1847static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
1808 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1848{
1809 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1849 leon_flush_cache_all();
1810 1850}
1811 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, 1851
1812 BTFIXUPCALL_NOP); 1852static void leon_flush_page_for_dma(unsigned long page)
1813 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); 1853{
1854 leon_flush_dcache_all();
1855}
1856
1857void __init poke_leonsparc(void)
1858{
1859}
1860
1861static const struct sparc32_cachetlb_ops leon_ops = {
1862 .cache_all = leon_flush_cache_all,
1863 .cache_mm = leon_flush_cache_mm,
1864 .cache_page = leon_flush_cache_page,
1865 .cache_range = leon_flush_cache_range,
1866 .tlb_all = leon_flush_tlb_all,
1867 .tlb_mm = leon_flush_tlb_mm,
1868 .tlb_page = leon_flush_tlb_page,
1869 .tlb_range = leon_flush_tlb_range,
1870 .page_to_ram = leon_flush_page_to_ram,
1871 .sig_insns = leon_flush_sig_insns,
1872 .page_for_dma = leon_flush_page_for_dma,
1873};
1814 1874
1875void __init init_leon(void)
1876{
1877 srmmu_name = "LEON";
1878 sparc32_cachetlb_ops = &leon_ops;
1815 poke_srmmu = poke_leonsparc; 1879 poke_srmmu = poke_leonsparc;
1816 1880
1817 srmmu_cache_pagetables = 0; 1881 srmmu_cache_pagetables = 0;
@@ -1925,10 +1989,152 @@ static void __init get_srmmu_type(void)
1925/* Local cross-calls. */ 1989/* Local cross-calls. */
1926static void smp_flush_page_for_dma(unsigned long page) 1990static void smp_flush_page_for_dma(unsigned long page)
1927{ 1991{
1928 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); 1992 xc1((smpfunc_t) local_ops->page_for_dma, page);
1929 local_flush_page_for_dma(page); 1993 local_ops->page_for_dma(page);
1994}
1995
1996static void smp_flush_cache_all(void)
1997{
1998 xc0((smpfunc_t) local_ops->cache_all);
1999 local_ops->cache_all();
2000}
2001
2002static void smp_flush_tlb_all(void)
2003{
2004 xc0((smpfunc_t) local_ops->tlb_all);
2005 local_ops->tlb_all();
2006}
2007
2008static void smp_flush_cache_mm(struct mm_struct *mm)
2009{
2010 if (mm->context != NO_CONTEXT) {
2011 cpumask_t cpu_mask;
2012 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2013 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2014 if (!cpumask_empty(&cpu_mask))
2015 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
2016 local_ops->cache_mm(mm);
2017 }
2018}
2019
2020static void smp_flush_tlb_mm(struct mm_struct *mm)
2021{
2022 if (mm->context != NO_CONTEXT) {
2023 cpumask_t cpu_mask;
2024 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2025 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2026 if (!cpumask_empty(&cpu_mask)) {
2027 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
2028 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
2029 cpumask_copy(mm_cpumask(mm),
2030 cpumask_of(smp_processor_id()));
2031 }
2032 local_ops->tlb_mm(mm);
2033 }
2034}
2035
2036static void smp_flush_cache_range(struct vm_area_struct *vma,
2037 unsigned long start,
2038 unsigned long end)
2039{
2040 struct mm_struct *mm = vma->vm_mm;
2041
2042 if (mm->context != NO_CONTEXT) {
2043 cpumask_t cpu_mask;
2044 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2045 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2046 if (!cpumask_empty(&cpu_mask))
2047 xc3((smpfunc_t) local_ops->cache_range,
2048 (unsigned long) vma, start, end);
2049 local_ops->cache_range(vma, start, end);
2050 }
2051}
2052
2053static void smp_flush_tlb_range(struct vm_area_struct *vma,
2054 unsigned long start,
2055 unsigned long end)
2056{
2057 struct mm_struct *mm = vma->vm_mm;
2058
2059 if (mm->context != NO_CONTEXT) {
2060 cpumask_t cpu_mask;
2061 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2062 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2063 if (!cpumask_empty(&cpu_mask))
2064 xc3((smpfunc_t) local_ops->tlb_range,
2065 (unsigned long) vma, start, end);
2066 local_ops->tlb_range(vma, start, end);
2067 }
1930} 2068}
1931 2069
2070static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
2071{
2072 struct mm_struct *mm = vma->vm_mm;
2073
2074 if (mm->context != NO_CONTEXT) {
2075 cpumask_t cpu_mask;
2076 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2077 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2078 if (!cpumask_empty(&cpu_mask))
2079 xc2((smpfunc_t) local_ops->cache_page,
2080 (unsigned long) vma, page);
2081 local_ops->cache_page(vma, page);
2082 }
2083}
2084
2085static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2086{
2087 struct mm_struct *mm = vma->vm_mm;
2088
2089 if (mm->context != NO_CONTEXT) {
2090 cpumask_t cpu_mask;
2091 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2092 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2093 if (!cpumask_empty(&cpu_mask))
2094 xc2((smpfunc_t) local_ops->tlb_page,
2095 (unsigned long) vma, page);
2096 local_ops->tlb_page(vma, page);
2097 }
2098}
2099
2100static void smp_flush_page_to_ram(unsigned long page)
2101{
2102 /* Current theory is that those who call this are the one's
2103 * who have just dirtied their cache with the pages contents
2104 * in kernel space, therefore we only run this on local cpu.
2105 *
2106 * XXX This experiment failed, research further... -DaveM
2107 */
2108#if 1
2109 xc1((smpfunc_t) local_ops->page_to_ram, page);
2110#endif
2111 local_ops->page_to_ram(page);
2112}
2113
2114static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
2115{
2116 cpumask_t cpu_mask;
2117 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2118 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2119 if (!cpumask_empty(&cpu_mask))
2120 xc2((smpfunc_t) local_ops->sig_insns,
2121 (unsigned long) mm, insn_addr);
2122 local_ops->sig_insns(mm, insn_addr);
2123}
2124
2125static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
2126 .cache_all = smp_flush_cache_all,
2127 .cache_mm = smp_flush_cache_mm,
2128 .cache_page = smp_flush_cache_page,
2129 .cache_range = smp_flush_cache_range,
2130 .tlb_all = smp_flush_tlb_all,
2131 .tlb_mm = smp_flush_tlb_mm,
2132 .tlb_page = smp_flush_tlb_page,
2133 .tlb_range = smp_flush_tlb_range,
2134 .page_to_ram = smp_flush_page_to_ram,
2135 .sig_insns = smp_flush_sig_insns,
2136 .page_for_dma = smp_flush_page_for_dma,
2137};
1932#endif 2138#endif
1933 2139
1934/* Load up routines and constants for sun4m and sun4d mmu */ 2140/* Load up routines and constants for sun4m and sun4d mmu */
@@ -1942,44 +2148,30 @@ void __init load_mmu(void)
1942 2148
1943#ifdef CONFIG_SMP 2149#ifdef CONFIG_SMP
1944 /* El switcheroo... */ 2150 /* El switcheroo... */
2151 local_ops = sparc32_cachetlb_ops;
1945 2152
1946 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); 2153 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1947 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); 2154 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1948 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); 2155 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1949 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); 2156 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1950 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); 2157 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1951 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
1952 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
1953 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
1954 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
1955 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
1956 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
1957
1958 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
1959 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
1960 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
1961 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
1962 if (sparc_cpu_model != sun4d &&
1963 sparc_cpu_model != sparc_leon) {
1964 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
1965 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
1966 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
1967 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
1968 } 2158 }
1969 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
1970 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
1971 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
1972 2159
1973 if (poke_srmmu == poke_viking) { 2160 if (poke_srmmu == poke_viking) {
1974 /* Avoid unnecessary cross calls. */ 2161 /* Avoid unnecessary cross calls. */
1975 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); 2162 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1976 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); 2163 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1977 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); 2164 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1978 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); 2165 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1979 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); 2166
1980 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); 2167 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1981 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); 2168 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
2169 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1982 } 2170 }
2171
2172 /* It really is const after this point. */
2173 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
2174 &smp_cachetlb_ops;
1983#endif 2175#endif
1984 2176
1985 if (sparc_cpu_model == sun4d) 2177 if (sparc_cpu_model == sun4d)