aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-24 19:51:47 -0500
committerPaul Mundt <lethal@linux-sh.org>2007-02-12 20:54:45 -0500
commitaec5e0e1c179fac4bbca4007a3f0d3107275a73c (patch)
tree3b251e52a89445a5546f398fb16a002435b6c2b6
parent506b85f4114b912d2e91fab8da9849289e43857f (diff)
sh: Use a per-cpu ASID cache.
Previously this was implemented using a global cache, cache this per-CPU instead and bump up the number of context IDs to match NR_CPUS. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/cpu/init.c11
-rw-r--r--arch/sh/kernel/process.c66
-rw-r--r--arch/sh/mm/init.c5
-rw-r--r--arch/sh/mm/tlb-flush.c26
-rw-r--r--include/asm-sh/mmu.h20
-rw-r--r--include/asm-sh/mmu_context.h61
-rw-r--r--include/asm-sh/processor.h1
7 files changed, 97 insertions, 93 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 48121766e8d2..6c3c7687e81f 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * CPU init code 4 * CPU init code
5 * 5 *
6 * Copyright (C) 2002, 2003 Paul Mundt 6 * Copyright (C) 2002 - 2006 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow 7 * Copyright (C) 2003 Richard Curnow
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -12,6 +12,8 @@
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <asm/mmu_context.h>
15#include <asm/processor.h> 17#include <asm/processor.h>
16#include <asm/uaccess.h> 18#include <asm/uaccess.h>
17#include <asm/page.h> 19#include <asm/page.h>
@@ -218,6 +220,12 @@ asmlinkage void __init sh_cpu_init(void)
218 clear_used_math(); 220 clear_used_math();
219 } 221 }
220 222
223 /*
224 * Initialize the per-CPU ASID cache very early, since the
225 * TLB flushing routines depend on this being setup.
226 */
227 current_cpu_data.asid_cache = NO_CONTEXT;
228
221#ifdef CONFIG_SH_DSP 229#ifdef CONFIG_SH_DSP
222 /* Probe for DSP */ 230 /* Probe for DSP */
223 dsp_init(); 231 dsp_init();
@@ -240,4 +248,3 @@ asmlinkage void __init sh_cpu_init(void)
240 ubc_wakeup(); 248 ubc_wakeup();
241#endif 249#endif
242} 250}
243
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index cc8f306fd682..0298f0faa6e6 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -1,42 +1,30 @@
1/* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ 1/*
2 * arch/sh/kernel/process.c
2 * 3 *
3 * linux/arch/sh/kernel/process.c 4 * This file handles the architecture-dependent parts of process handling..
4 * 5 *
5 * Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 1995 Linus Torvalds
6 * 7 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC 9 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
10 * Copyright (C) 2002 - 2006 Paul Mundt
9 */ 11 */
10
11/*
12 * This file handles the architecture-dependent parts of process handling..
13 */
14
15#include <linux/module.h> 12#include <linux/module.h>
16#include <linux/unistd.h>
17#include <linux/mm.h> 13#include <linux/mm.h>
18#include <linux/elfcore.h> 14#include <linux/elfcore.h>
19#include <linux/a.out.h>
20#include <linux/slab.h>
21#include <linux/pm.h> 15#include <linux/pm.h>
22#include <linux/ptrace.h>
23#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
24#include <linux/kexec.h> 17#include <linux/kexec.h>
25
26#include <asm/io.h>
27#include <asm/uaccess.h> 18#include <asm/uaccess.h>
28#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
29#include <asm/elf.h>
30#include <asm/ubc.h> 20#include <asm/ubc.h>
31 21
32static int hlt_counter=0; 22static int hlt_counter;
33
34int ubc_usercnt = 0; 23int ubc_usercnt = 0;
35 24
36#define HARD_IDLE_TIMEOUT (HZ / 3) 25#define HARD_IDLE_TIMEOUT (HZ / 3)
37 26
38void (*pm_idle)(void); 27void (*pm_idle)(void);
39
40void (*pm_power_off)(void); 28void (*pm_power_off)(void);
41EXPORT_SYMBOL(pm_power_off); 29EXPORT_SYMBOL(pm_power_off);
42 30
@@ -44,14 +32,12 @@ void disable_hlt(void)
44{ 32{
45 hlt_counter++; 33 hlt_counter++;
46} 34}
47
48EXPORT_SYMBOL(disable_hlt); 35EXPORT_SYMBOL(disable_hlt);
49 36
50void enable_hlt(void) 37void enable_hlt(void)
51{ 38{
52 hlt_counter--; 39 hlt_counter--;
53} 40}
54
55EXPORT_SYMBOL(enable_hlt); 41EXPORT_SYMBOL(enable_hlt);
56 42
57void default_idle(void) 43void default_idle(void)
@@ -152,19 +138,21 @@ __asm__(".align 5\n"
152 ".align 2\n\t" 138 ".align 2\n\t"
153 "1:.long do_exit"); 139 "1:.long do_exit");
154 140
141/* Don't use this in BL=1(cli). Or else, CPU resets! */
155int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 142int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
156{ /* Don't use this in BL=1(cli). Or else, CPU resets! */ 143{
157 struct pt_regs regs; 144 struct pt_regs regs;
158 145
159 memset(&regs, 0, sizeof(regs)); 146 memset(&regs, 0, sizeof(regs));
160 regs.regs[4] = (unsigned long) arg; 147 regs.regs[4] = (unsigned long)arg;
161 regs.regs[5] = (unsigned long) fn; 148 regs.regs[5] = (unsigned long)fn;
162 149
163 regs.pc = (unsigned long) kernel_thread_helper; 150 regs.pc = (unsigned long)kernel_thread_helper;
164 regs.sr = (1 << 30); 151 regs.sr = (1 << 30);
165 152
166 /* Ok, create the new process.. */ 153 /* Ok, create the new process.. */
167 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 154 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
155 &regs, 0, NULL, NULL);
168} 156}
169 157
170/* 158/*
@@ -211,21 +199,20 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
211 return fpvalid; 199 return fpvalid;
212} 200}
213 201
214/* 202/*
215 * Capture the user space registers if the task is not running (in user space) 203 * Capture the user space registers if the task is not running (in user space)
216 */ 204 */
217int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 205int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
218{ 206{
219 struct pt_regs ptregs; 207 struct pt_regs ptregs;
220 208
221 ptregs = *task_pt_regs(tsk); 209 ptregs = *task_pt_regs(tsk);
222 elf_core_copy_regs(regs, &ptregs); 210 elf_core_copy_regs(regs, &ptregs);
223 211
224 return 1; 212 return 1;
225} 213}
226 214
227int 215int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
228dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
229{ 216{
230 int fpvalid = 0; 217 int fpvalid = 0;
231 218
@@ -263,12 +250,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
263 childregs->regs[15] = usp; 250 childregs->regs[15] = usp;
264 ti->addr_limit = USER_DS; 251 ti->addr_limit = USER_DS;
265 } else { 252 } else {
266 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; 253 childregs->regs[15] = (unsigned long)task_stack_page(p) +
254 THREAD_SIZE;
267 ti->addr_limit = KERNEL_DS; 255 ti->addr_limit = KERNEL_DS;
268 } 256 }
269 if (clone_flags & CLONE_SETTLS) { 257
258 if (clone_flags & CLONE_SETTLS)
270 childregs->gbr = childregs->regs[0]; 259 childregs->gbr = childregs->regs[0];
271 } 260
272 childregs->regs[0] = 0; /* Set return value for child */ 261 childregs->regs[0] = 0; /* Set return value for child */
273 262
274 p->thread.sp = (unsigned long) childregs; 263 p->thread.sp = (unsigned long) childregs;
@@ -280,8 +269,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
280} 269}
281 270
282/* Tracing by user break controller. */ 271/* Tracing by user break controller. */
283static void 272static void ubc_set_tracing(int asid, unsigned long pc)
284ubc_set_tracing(int asid, unsigned long pc)
285{ 273{
286#if defined(CONFIG_CPU_SH4A) 274#if defined(CONFIG_CPU_SH4A)
287 unsigned long val; 275 unsigned long val;
@@ -297,7 +285,7 @@ ubc_set_tracing(int asid, unsigned long pc)
297 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); 285 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
298 ctrl_outl(val, UBC_CRR0); 286 ctrl_outl(val, UBC_CRR0);
299 287
300 /* Read UBC register that we writed last. For chekking UBC Register changed */ 288 /* Read UBC register that we wrote last, for checking update */
301 val = ctrl_inl(UBC_CRR0); 289 val = ctrl_inl(UBC_CRR0);
302 290
303#else /* CONFIG_CPU_SH4A */ 291#else /* CONFIG_CPU_SH4A */
@@ -325,7 +313,8 @@ ubc_set_tracing(int asid, unsigned long pc)
325 * switch_to(x,y) should switch tasks from x to y. 313 * switch_to(x,y) should switch tasks from x to y.
326 * 314 *
327 */ 315 */
328struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) 316struct task_struct *__switch_to(struct task_struct *prev,
317 struct task_struct *next)
329{ 318{
330#if defined(CONFIG_SH_FPU) 319#if defined(CONFIG_SH_FPU)
331 unlazy_fpu(prev, task_pt_regs(prev)); 320 unlazy_fpu(prev, task_pt_regs(prev));
@@ -354,7 +343,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
354#ifdef CONFIG_MMU 343#ifdef CONFIG_MMU
355 /* 344 /*
356 * Restore the kernel mode register 345 * Restore the kernel mode register
357 * k7 (r7_bank1) 346 * k7 (r7_bank1)
358 */ 347 */
359 asm volatile("ldc %0, r7_bank" 348 asm volatile("ldc %0, r7_bank"
360 : /* no output */ 349 : /* no output */
@@ -367,7 +356,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
367 else if (next->thread.ubc_pc && next->mm) { 356 else if (next->thread.ubc_pc && next->mm) {
368 int asid = 0; 357 int asid = 0;
369#ifdef CONFIG_MMU 358#ifdef CONFIG_MMU
370 asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK; 359 asid |= cpu_asid(smp_processor_id(), next->mm);
371#endif 360#endif
372 ubc_set_tracing(asid, next->thread.ubc_pc); 361 ubc_set_tracing(asid, next->thread.ubc_pc);
373 } else { 362 } else {
@@ -405,7 +394,8 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
405 if (!newsp) 394 if (!newsp)
406 newsp = regs->regs[15]; 395 newsp = regs->regs[15];
407 return do_fork(clone_flags, newsp, regs, 0, 396 return do_fork(clone_flags, newsp, regs, 0,
408 (int __user *)parent_tidptr, (int __user *)child_tidptr); 397 (int __user *)parent_tidptr,
398 (int __user *)child_tidptr);
409} 399}
410 400
411/* 401/*
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index bf0c263cb6fd..d172065182fb 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -39,11 +39,6 @@
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40pgd_t swapper_pg_dir[PTRS_PER_PGD]; 40pgd_t swapper_pg_dir[PTRS_PER_PGD];
41 41
42/*
43 * Cache of MMU context last used.
44 */
45unsigned long mmu_context_cache = NO_CONTEXT;
46
47#ifdef CONFIG_MMU 42#ifdef CONFIG_MMU
48/* It'd be good if these lines were in the standard header file. */ 43/* It'd be good if these lines were in the standard header file. */
49#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) 44#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
index ef3e4d477864..b829c17c1d17 100644
--- a/arch/sh/mm/tlb-flush.c
+++ b/arch/sh/mm/tlb-flush.c
@@ -16,12 +16,14 @@
16 16
17void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 17void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
18{ 18{
19 if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { 19 unsigned int cpu = smp_processor_id();
20
21 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
20 unsigned long flags; 22 unsigned long flags;
21 unsigned long asid; 23 unsigned long asid;
22 unsigned long saved_asid = MMU_NO_ASID; 24 unsigned long saved_asid = MMU_NO_ASID;
23 25
24 asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; 26 asid = cpu_asid(cpu, vma->vm_mm);
25 page &= PAGE_MASK; 27 page &= PAGE_MASK;
26 28
27 local_irq_save(flags); 29 local_irq_save(flags);
@@ -40,22 +42,23 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
40 unsigned long end) 42 unsigned long end)
41{ 43{
42 struct mm_struct *mm = vma->vm_mm; 44 struct mm_struct *mm = vma->vm_mm;
45 unsigned int cpu = smp_processor_id();
43 46
44 if (mm->context.id != NO_CONTEXT) { 47 if (cpu_context(cpu, mm) != NO_CONTEXT) {
45 unsigned long flags; 48 unsigned long flags;
46 int size; 49 int size;
47 50
48 local_irq_save(flags); 51 local_irq_save(flags);
49 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 52 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
50 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 53 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
51 mm->context.id = NO_CONTEXT; 54 cpu_context(cpu, mm) = NO_CONTEXT;
52 if (mm == current->mm) 55 if (mm == current->mm)
53 activate_context(mm); 56 activate_context(mm, cpu);
54 } else { 57 } else {
55 unsigned long asid; 58 unsigned long asid;
56 unsigned long saved_asid = MMU_NO_ASID; 59 unsigned long saved_asid = MMU_NO_ASID;
57 60
58 asid = mm->context.id & MMU_CONTEXT_ASID_MASK; 61 asid = cpu_asid(cpu, mm);
59 start &= PAGE_MASK; 62 start &= PAGE_MASK;
60 end += (PAGE_SIZE - 1); 63 end += (PAGE_SIZE - 1);
61 end &= PAGE_MASK; 64 end &= PAGE_MASK;
@@ -76,6 +79,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
76 79
77void flush_tlb_kernel_range(unsigned long start, unsigned long end) 80void flush_tlb_kernel_range(unsigned long start, unsigned long end)
78{ 81{
82 unsigned int cpu = smp_processor_id();
79 unsigned long flags; 83 unsigned long flags;
80 int size; 84 int size;
81 85
@@ -87,7 +91,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
87 unsigned long asid; 91 unsigned long asid;
88 unsigned long saved_asid = get_asid(); 92 unsigned long saved_asid = get_asid();
89 93
90 asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; 94 asid = cpu_asid(cpu, &init_mm);
91 start &= PAGE_MASK; 95 start &= PAGE_MASK;
92 end += (PAGE_SIZE - 1); 96 end += (PAGE_SIZE - 1);
93 end &= PAGE_MASK; 97 end &= PAGE_MASK;
@@ -103,15 +107,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
103 107
104void flush_tlb_mm(struct mm_struct *mm) 108void flush_tlb_mm(struct mm_struct *mm)
105{ 109{
110 unsigned int cpu = smp_processor_id();
111
106 /* Invalidate all TLB of this process. */ 112 /* Invalidate all TLB of this process. */
107 /* Instead of invalidating each TLB, we get new MMU context. */ 113 /* Instead of invalidating each TLB, we get new MMU context. */
108 if (mm->context.id != NO_CONTEXT) { 114 if (cpu_context(cpu, mm) != NO_CONTEXT) {
109 unsigned long flags; 115 unsigned long flags;
110 116
111 local_irq_save(flags); 117 local_irq_save(flags);
112 mm->context.id = NO_CONTEXT; 118 cpu_context(cpu, mm) = NO_CONTEXT;
113 if (mm == current->mm) 119 if (mm == current->mm)
114 activate_context(mm); 120 activate_context(mm, cpu);
115 local_irq_restore(flags); 121 local_irq_restore(flags);
116 } 122 }
117} 123}
diff --git a/include/asm-sh/mmu.h b/include/asm-sh/mmu.h
index cf47df79bb94..eb0358c097d0 100644
--- a/include/asm-sh/mmu.h
+++ b/include/asm-sh/mmu.h
@@ -1,25 +1,19 @@
1#ifndef __MMU_H 1#ifndef __MMU_H
2#define __MMU_H 2#define __MMU_H
3 3
4#if !defined(CONFIG_MMU) 4/* Default "unsigned long" context */
5typedef unsigned long mm_context_id_t[NR_CPUS];
5 6
6typedef struct { 7typedef struct {
8#ifdef CONFIG_MMU
9 mm_context_id_t id;
10 void *vdso;
11#else
7 struct vm_list_struct *vmlist; 12 struct vm_list_struct *vmlist;
8 unsigned long end_brk; 13 unsigned long end_brk;
14#endif
9} mm_context_t; 15} mm_context_t;
10 16
11#else
12
13/* Default "unsigned long" context */
14typedef unsigned long mm_context_id_t;
15
16typedef struct {
17 mm_context_id_t id;
18 void *vdso;
19} mm_context_t;
20
21#endif /* CONFIG_MMU */
22
23/* 17/*
24 * Privileged Space Mapping Buffer (PMB) definitions 18 * Privileged Space Mapping Buffer (PMB) definitions
25 */ 19 */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 46f04e23bd45..342024425b7d 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 Niibe Yutaka 2 * Copyright (C) 1999 Niibe Yutaka
3 * Copyright (C) 2003 Paul Mundt 3 * Copyright (C) 2003 - 2006 Paul Mundt
4 * 4 *
5 * ASID handling idea taken from MIPS implementation. 5 * ASID handling idea taken from MIPS implementation.
6 */ 6 */
@@ -19,11 +19,6 @@
19 * (b) ASID (Address Space IDentifier) 19 * (b) ASID (Address Space IDentifier)
20 */ 20 */
21 21
22/*
23 * Cache of MMU context last used.
24 */
25extern unsigned long mmu_context_cache;
26
27#define MMU_CONTEXT_ASID_MASK 0x000000ff 22#define MMU_CONTEXT_ASID_MASK 0x000000ff
28#define MMU_CONTEXT_VERSION_MASK 0xffffff00 23#define MMU_CONTEXT_VERSION_MASK 0xffffff00
29#define MMU_CONTEXT_FIRST_VERSION 0x00000100 24#define MMU_CONTEXT_FIRST_VERSION 0x00000100
@@ -32,6 +27,11 @@ extern unsigned long mmu_context_cache;
32/* ASID is 8-bit value, so it can't be 0x100 */ 27/* ASID is 8-bit value, so it can't be 0x100 */
33#define MMU_NO_ASID 0x100 28#define MMU_NO_ASID 0x100
34 29
30#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
31#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
32 MMU_CONTEXT_ASID_MASK)
33#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
34
35/* 35/*
36 * Virtual Page Number mask 36 * Virtual Page Number mask
37 */ 37 */
@@ -41,18 +41,17 @@ extern unsigned long mmu_context_cache;
41/* 41/*
42 * Get MMU context if needed. 42 * Get MMU context if needed.
43 */ 43 */
44static inline void get_mmu_context(struct mm_struct *mm) 44static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
45{ 45{
46 unsigned long mc = mmu_context_cache; 46 unsigned long asid = asid_cache(cpu);
47 47
48 /* Check if we have old version of context. */ 48 /* Check if we have old version of context. */
49 if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) 49 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
50 /* It's up to date, do nothing */ 50 /* It's up to date, do nothing */
51 return; 51 return;
52 52
53 /* It's old, we need to get new context with new version. */ 53 /* It's old, we need to get new context with new version. */
54 mc = ++mmu_context_cache; 54 if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
55 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
56 /* 55 /*
57 * We exhaust ASID of this version. 56 * We exhaust ASID of this version.
58 * Flush all TLB and start new cycle. 57 * Flush all TLB and start new cycle.
@@ -63,10 +62,11 @@ static inline void get_mmu_context(struct mm_struct *mm)
63 * Fix version; Note that we avoid version #0 62 * Fix version; Note that we avoid version #0
64 * to distingush NO_CONTEXT. 63 * to distingush NO_CONTEXT.
65 */ 64 */
66 if (!mc) 65 if (!asid)
67 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; 66 asid = MMU_CONTEXT_FIRST_VERSION;
68 } 67 }
69 mm->context.id = mc; 68
69 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
70} 70}
71 71
72/* 72/*
@@ -74,9 +74,13 @@ static inline void get_mmu_context(struct mm_struct *mm)
74 * instance. 74 * instance.
75 */ 75 */
76static inline int init_new_context(struct task_struct *tsk, 76static inline int init_new_context(struct task_struct *tsk,
77 struct mm_struct *mm) 77 struct mm_struct *mm)
78{ 78{
79 mm->context.id = NO_CONTEXT; 79 int i;
80
81 for (i = 0; i < num_online_cpus(); i++)
82 cpu_context(i, mm) = NO_CONTEXT;
83
80 return 0; 84 return 0;
81} 85}
82 86
@@ -117,10 +121,10 @@ static inline unsigned long get_asid(void)
117 * After we have set current->mm to a new value, this activates 121 * After we have set current->mm to a new value, this activates
118 * the context for the new mm so we see the new mappings. 122 * the context for the new mm so we see the new mappings.
119 */ 123 */
120static inline void activate_context(struct mm_struct *mm) 124static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
121{ 125{
122 get_mmu_context(mm); 126 get_mmu_context(mm, cpu);
123 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); 127 set_asid(cpu_asid(cpu, mm));
124} 128}
125 129
126/* MMU_TTB is used for optimizing the fault handling. */ 130/* MMU_TTB is used for optimizing the fault handling. */
@@ -138,10 +142,15 @@ static inline void switch_mm(struct mm_struct *prev,
138 struct mm_struct *next, 142 struct mm_struct *next,
139 struct task_struct *tsk) 143 struct task_struct *tsk)
140{ 144{
145 unsigned int cpu = smp_processor_id();
146
141 if (likely(prev != next)) { 147 if (likely(prev != next)) {
148 cpu_set(cpu, next->cpu_vm_mask);
142 set_TTB(next->pgd); 149 set_TTB(next->pgd);
143 activate_context(next); 150 activate_context(next, cpu);
144 } 151 } else
152 if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
153 activate_context(next, cpu);
145} 154}
146 155
147#define deactivate_mm(tsk,mm) do { } while (0) 156#define deactivate_mm(tsk,mm) do { } while (0)
@@ -159,7 +168,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
159#define destroy_context(mm) do { } while (0) 168#define destroy_context(mm) do { } while (0)
160#define set_asid(asid) do { } while (0) 169#define set_asid(asid) do { } while (0)
161#define get_asid() (0) 170#define get_asid() (0)
162#define activate_context(mm) do { } while (0) 171#define activate_context(mm,cpu) do { } while (0)
163#define switch_mm(prev,next,tsk) do { } while (0) 172#define switch_mm(prev,next,tsk) do { } while (0)
164#define deactivate_mm(tsk,mm) do { } while (0) 173#define deactivate_mm(tsk,mm) do { } while (0)
165#define activate_mm(prev,next) do { } while (0) 174#define activate_mm(prev,next) do { } while (0)
@@ -174,14 +183,16 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
174 */ 183 */
175static inline void enable_mmu(void) 184static inline void enable_mmu(void)
176{ 185{
186 unsigned int cpu = smp_processor_id();
187
177 /* Enable MMU */ 188 /* Enable MMU */
178 ctrl_outl(MMU_CONTROL_INIT, MMUCR); 189 ctrl_outl(MMU_CONTROL_INIT, MMUCR);
179 ctrl_barrier(); 190 ctrl_barrier();
180 191
181 if (mmu_context_cache == NO_CONTEXT) 192 if (asid_cache(cpu) == NO_CONTEXT)
182 mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; 193 asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
183 194
184 set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); 195 set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
185} 196}
186 197
187static inline void disable_mmu(void) 198static inline void disable_mmu(void)
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index e29f2abb92de..da229aae8e0f 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -66,6 +66,7 @@ enum cpu_type {
66struct sh_cpuinfo { 66struct sh_cpuinfo {
67 unsigned int type; 67 unsigned int type;
68 unsigned long loops_per_jiffy; 68 unsigned long loops_per_jiffy;
69 unsigned long asid_cache;
69 70
70 struct cache_info icache; /* Primary I-cache */ 71 struct cache_info icache; /* Primary I-cache */
71 struct cache_info dcache; /* Primary D-cache */ 72 struct cache_info dcache; /* Primary D-cache */