diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-12-24 19:51:47 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-02-12 20:54:45 -0500 |
commit | aec5e0e1c179fac4bbca4007a3f0d3107275a73c (patch) | |
tree | 3b251e52a89445a5546f398fb16a002435b6c2b6 /arch/sh/kernel/process.c | |
parent | 506b85f4114b912d2e91fab8da9849289e43857f (diff) |
sh: Use a per-cpu ASID cache.
Previously this was implemented using a global cache, cache
this per-CPU instead and bump up the number of context IDs to
match NR_CPUS.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/process.c')
-rw-r--r-- | arch/sh/kernel/process.c | 66 |
1 files changed, 28 insertions, 38 deletions
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index cc8f306fd682..0298f0faa6e6 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c | |||
@@ -1,42 +1,30 @@ | |||
1 | /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ | 1 | /* |
2 | * arch/sh/kernel/process.c | ||
2 | * | 3 | * |
3 | * linux/arch/sh/kernel/process.c | 4 | * This file handles the architecture-dependent parts of process handling.. |
4 | * | 5 | * |
5 | * Copyright (C) 1995 Linus Torvalds | 6 | * Copyright (C) 1995 Linus Torvalds |
6 | * | 7 | * |
7 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | 8 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
8 | * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC | 9 | * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC |
10 | * Copyright (C) 2002 - 2006 Paul Mundt | ||
9 | */ | 11 | */ |
10 | |||
11 | /* | ||
12 | * This file handles the architecture-dependent parts of process handling.. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | 12 | #include <linux/module.h> |
16 | #include <linux/unistd.h> | ||
17 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
18 | #include <linux/elfcore.h> | 14 | #include <linux/elfcore.h> |
19 | #include <linux/a.out.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/pm.h> | 15 | #include <linux/pm.h> |
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/kallsyms.h> | 16 | #include <linux/kallsyms.h> |
24 | #include <linux/kexec.h> | 17 | #include <linux/kexec.h> |
25 | |||
26 | #include <asm/io.h> | ||
27 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
28 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
29 | #include <asm/elf.h> | ||
30 | #include <asm/ubc.h> | 20 | #include <asm/ubc.h> |
31 | 21 | ||
32 | static int hlt_counter=0; | 22 | static int hlt_counter; |
33 | |||
34 | int ubc_usercnt = 0; | 23 | int ubc_usercnt = 0; |
35 | 24 | ||
36 | #define HARD_IDLE_TIMEOUT (HZ / 3) | 25 | #define HARD_IDLE_TIMEOUT (HZ / 3) |
37 | 26 | ||
38 | void (*pm_idle)(void); | 27 | void (*pm_idle)(void); |
39 | |||
40 | void (*pm_power_off)(void); | 28 | void (*pm_power_off)(void); |
41 | EXPORT_SYMBOL(pm_power_off); | 29 | EXPORT_SYMBOL(pm_power_off); |
42 | 30 | ||
@@ -44,14 +32,12 @@ void disable_hlt(void) | |||
44 | { | 32 | { |
45 | hlt_counter++; | 33 | hlt_counter++; |
46 | } | 34 | } |
47 | |||
48 | EXPORT_SYMBOL(disable_hlt); | 35 | EXPORT_SYMBOL(disable_hlt); |
49 | 36 | ||
50 | void enable_hlt(void) | 37 | void enable_hlt(void) |
51 | { | 38 | { |
52 | hlt_counter--; | 39 | hlt_counter--; |
53 | } | 40 | } |
54 | |||
55 | EXPORT_SYMBOL(enable_hlt); | 41 | EXPORT_SYMBOL(enable_hlt); |
56 | 42 | ||
57 | void default_idle(void) | 43 | void default_idle(void) |
@@ -152,19 +138,21 @@ __asm__(".align 5\n" | |||
152 | ".align 2\n\t" | 138 | ".align 2\n\t" |
153 | "1:.long do_exit"); | 139 | "1:.long do_exit"); |
154 | 140 | ||
141 | /* Don't use this in BL=1(cli). Or else, CPU resets! */ | ||
155 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | 142 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
156 | { /* Don't use this in BL=1(cli). Or else, CPU resets! */ | 143 | { |
157 | struct pt_regs regs; | 144 | struct pt_regs regs; |
158 | 145 | ||
159 | memset(®s, 0, sizeof(regs)); | 146 | memset(®s, 0, sizeof(regs)); |
160 | regs.regs[4] = (unsigned long) arg; | 147 | regs.regs[4] = (unsigned long)arg; |
161 | regs.regs[5] = (unsigned long) fn; | 148 | regs.regs[5] = (unsigned long)fn; |
162 | 149 | ||
163 | regs.pc = (unsigned long) kernel_thread_helper; | 150 | regs.pc = (unsigned long)kernel_thread_helper; |
164 | regs.sr = (1 << 30); | 151 | regs.sr = (1 << 30); |
165 | 152 | ||
166 | /* Ok, create the new process.. */ | 153 | /* Ok, create the new process.. */ |
167 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | 154 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, |
155 | ®s, 0, NULL, NULL); | ||
168 | } | 156 | } |
169 | 157 | ||
170 | /* | 158 | /* |
@@ -211,21 +199,20 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
211 | return fpvalid; | 199 | return fpvalid; |
212 | } | 200 | } |
213 | 201 | ||
214 | /* | 202 | /* |
215 | * Capture the user space registers if the task is not running (in user space) | 203 | * Capture the user space registers if the task is not running (in user space) |
216 | */ | 204 | */ |
217 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | 205 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) |
218 | { | 206 | { |
219 | struct pt_regs ptregs; | 207 | struct pt_regs ptregs; |
220 | 208 | ||
221 | ptregs = *task_pt_regs(tsk); | 209 | ptregs = *task_pt_regs(tsk); |
222 | elf_core_copy_regs(regs, &ptregs); | 210 | elf_core_copy_regs(regs, &ptregs); |
223 | 211 | ||
224 | return 1; | 212 | return 1; |
225 | } | 213 | } |
226 | 214 | ||
227 | int | 215 | int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu) |
228 | dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu) | ||
229 | { | 216 | { |
230 | int fpvalid = 0; | 217 | int fpvalid = 0; |
231 | 218 | ||
@@ -263,12 +250,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
263 | childregs->regs[15] = usp; | 250 | childregs->regs[15] = usp; |
264 | ti->addr_limit = USER_DS; | 251 | ti->addr_limit = USER_DS; |
265 | } else { | 252 | } else { |
266 | childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 253 | childregs->regs[15] = (unsigned long)task_stack_page(p) + |
254 | THREAD_SIZE; | ||
267 | ti->addr_limit = KERNEL_DS; | 255 | ti->addr_limit = KERNEL_DS; |
268 | } | 256 | } |
269 | if (clone_flags & CLONE_SETTLS) { | 257 | |
258 | if (clone_flags & CLONE_SETTLS) | ||
270 | childregs->gbr = childregs->regs[0]; | 259 | childregs->gbr = childregs->regs[0]; |
271 | } | 260 | |
272 | childregs->regs[0] = 0; /* Set return value for child */ | 261 | childregs->regs[0] = 0; /* Set return value for child */ |
273 | 262 | ||
274 | p->thread.sp = (unsigned long) childregs; | 263 | p->thread.sp = (unsigned long) childregs; |
@@ -280,8 +269,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
280 | } | 269 | } |
281 | 270 | ||
282 | /* Tracing by user break controller. */ | 271 | /* Tracing by user break controller. */ |
283 | static void | 272 | static void ubc_set_tracing(int asid, unsigned long pc) |
284 | ubc_set_tracing(int asid, unsigned long pc) | ||
285 | { | 273 | { |
286 | #if defined(CONFIG_CPU_SH4A) | 274 | #if defined(CONFIG_CPU_SH4A) |
287 | unsigned long val; | 275 | unsigned long val; |
@@ -297,7 +285,7 @@ ubc_set_tracing(int asid, unsigned long pc) | |||
297 | val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); | 285 | val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); |
298 | ctrl_outl(val, UBC_CRR0); | 286 | ctrl_outl(val, UBC_CRR0); |
299 | 287 | ||
300 | /* Read UBC register that we writed last. For chekking UBC Register changed */ | 288 | /* Read UBC register that we wrote last, for checking update */ |
301 | val = ctrl_inl(UBC_CRR0); | 289 | val = ctrl_inl(UBC_CRR0); |
302 | 290 | ||
303 | #else /* CONFIG_CPU_SH4A */ | 291 | #else /* CONFIG_CPU_SH4A */ |
@@ -325,7 +313,8 @@ ubc_set_tracing(int asid, unsigned long pc) | |||
325 | * switch_to(x,y) should switch tasks from x to y. | 313 | * switch_to(x,y) should switch tasks from x to y. |
326 | * | 314 | * |
327 | */ | 315 | */ |
328 | struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) | 316 | struct task_struct *__switch_to(struct task_struct *prev, |
317 | struct task_struct *next) | ||
329 | { | 318 | { |
330 | #if defined(CONFIG_SH_FPU) | 319 | #if defined(CONFIG_SH_FPU) |
331 | unlazy_fpu(prev, task_pt_regs(prev)); | 320 | unlazy_fpu(prev, task_pt_regs(prev)); |
@@ -354,7 +343,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne | |||
354 | #ifdef CONFIG_MMU | 343 | #ifdef CONFIG_MMU |
355 | /* | 344 | /* |
356 | * Restore the kernel mode register | 345 | * Restore the kernel mode register |
357 | * k7 (r7_bank1) | 346 | * k7 (r7_bank1) |
358 | */ | 347 | */ |
359 | asm volatile("ldc %0, r7_bank" | 348 | asm volatile("ldc %0, r7_bank" |
360 | : /* no output */ | 349 | : /* no output */ |
@@ -367,7 +356,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne | |||
367 | else if (next->thread.ubc_pc && next->mm) { | 356 | else if (next->thread.ubc_pc && next->mm) { |
368 | int asid = 0; | 357 | int asid = 0; |
369 | #ifdef CONFIG_MMU | 358 | #ifdef CONFIG_MMU |
370 | asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK; | 359 | asid |= cpu_asid(smp_processor_id(), next->mm); |
371 | #endif | 360 | #endif |
372 | ubc_set_tracing(asid, next->thread.ubc_pc); | 361 | ubc_set_tracing(asid, next->thread.ubc_pc); |
373 | } else { | 362 | } else { |
@@ -405,7 +394,8 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
405 | if (!newsp) | 394 | if (!newsp) |
406 | newsp = regs->regs[15]; | 395 | newsp = regs->regs[15]; |
407 | return do_fork(clone_flags, newsp, regs, 0, | 396 | return do_fork(clone_flags, newsp, regs, 0, |
408 | (int __user *)parent_tidptr, (int __user *)child_tidptr); | 397 | (int __user *)parent_tidptr, |
398 | (int __user *)child_tidptr); | ||
409 | } | 399 | } |
410 | 400 | ||
411 | /* | 401 | /* |