aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/smp.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-09-21 05:09:55 -0400
committerPaul Mundt <lethal@linux-sh.org>2007-09-21 05:09:55 -0400
commit9964fa8bf952c5c4df9676223fab4cd886d18200 (patch)
treef33281d2ff012be8c0b554c3cb5aca2eeb43c922 /arch/sh/kernel/smp.c
parent7ec9d6f8c0e6932d380da1964021fbebf2311f04 (diff)
sh: Add SMP tlbflush variants.
This adds the TLB flushing routines for SMP systems, based on the MIPS implementation, with some additional SH-specific flush routines. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/smp.c')
-rw-r--r--arch/sh/kernel/smp.c140
1 files changed, 140 insertions, 0 deletions
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 283e1425ced..f93d5ffa941 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -205,3 +205,143 @@ int setup_profiling_timer(unsigned int multiplier)
205 return 0; 205 return 0;
206} 206}
207 207
208static void flush_tlb_all_ipi(void *info)
209{
210 local_flush_tlb_all();
211}
212
213void flush_tlb_all(void)
214{
215 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
216}
217
218static void flush_tlb_mm_ipi(void *mm)
219{
220 local_flush_tlb_mm((struct mm_struct *)mm);
221}
222
223/*
224 * The following tlb flush calls are invoked when old translations are
225 * being torn down, or pte attributes are changing. For single threaded
226 * address spaces, a new context is obtained on the current cpu, and tlb
227 * context on other cpus are invalidated to force a new context allocation
228 * at switch_mm time, should the mm ever be used on other cpus. For
229 * multithreaded address spaces, intercpu interrupts have to be sent.
230 * Another case where intercpu interrupts are required is when the target
231 * mm might be active on another cpu (eg debuggers doing the flushes on
232 * behalf of debugees, kswapd stealing pages from another process etc).
233 * Kanoj 07/00.
234 */
235
236void flush_tlb_mm(struct mm_struct *mm)
237{
238 preempt_disable();
239
240 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
241 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
242 } else {
243 int i;
244 for (i = 0; i < num_online_cpus(); i++)
245 if (smp_processor_id() != i)
246 cpu_context(i, mm) = 0;
247 }
248 local_flush_tlb_mm(mm);
249
250 preempt_enable();
251}
252
253struct flush_tlb_data {
254 struct vm_area_struct *vma;
255 unsigned long addr1;
256 unsigned long addr2;
257};
258
259static void flush_tlb_range_ipi(void *info)
260{
261 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
262
263 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
264}
265
266void flush_tlb_range(struct vm_area_struct *vma,
267 unsigned long start, unsigned long end)
268{
269 struct mm_struct *mm = vma->vm_mm;
270
271 preempt_disable();
272 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
273 struct flush_tlb_data fd;
274
275 fd.vma = vma;
276 fd.addr1 = start;
277 fd.addr2 = end;
278 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
279 } else {
280 int i;
281 for (i = 0; i < num_online_cpus(); i++)
282 if (smp_processor_id() != i)
283 cpu_context(i, mm) = 0;
284 }
285 local_flush_tlb_range(vma, start, end);
286 preempt_enable();
287}
288
289static void flush_tlb_kernel_range_ipi(void *info)
290{
291 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
292
293 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
294}
295
296void flush_tlb_kernel_range(unsigned long start, unsigned long end)
297{
298 struct flush_tlb_data fd;
299
300 fd.addr1 = start;
301 fd.addr2 = end;
302 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
303}
304
305static void flush_tlb_page_ipi(void *info)
306{
307 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
308
309 local_flush_tlb_page(fd->vma, fd->addr1);
310}
311
312void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
313{
314 preempt_disable();
315 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
316 (current->mm != vma->vm_mm)) {
317 struct flush_tlb_data fd;
318
319 fd.vma = vma;
320 fd.addr1 = page;
321 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
322 } else {
323 int i;
324 for (i = 0; i < num_online_cpus(); i++)
325 if (smp_processor_id() != i)
326 cpu_context(i, vma->vm_mm) = 0;
327 }
328 local_flush_tlb_page(vma, page);
329 preempt_enable();
330}
331
332static void flush_tlb_one_ipi(void *info)
333{
334 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
335 local_flush_tlb_one(fd->addr1, fd->addr2);
336}
337
338void flush_tlb_one(unsigned long asid, unsigned long vaddr)
339{
340 struct flush_tlb_data fd;
341
342 fd.addr1 = asid;
343 fd.addr2 = vaddr;
344
345 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
346 local_flush_tlb_one(asid, vaddr);
347}