aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/process.c
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-10-16 04:27:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:05 -0400
commitba180fd437156f7fd8cfb2fdd021d949eeef08d6 (patch)
treeb9f38b9cdd7a5b1aacf00341d1948314663c5871 /arch/um/kernel/process.c
parent77bf4400319db9d2a8af6b00c2be6faa0f3d07cb (diff)
uml: style fixes pass 3
Formatting changes in the files which have been changed in the course of folding foo_skas functions into their callers. These include: copyright updates header file trimming style fixes adding severity to printks These changes should be entirely non-functional. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel/process.c')
-rw-r--r--arch/um/kernel/process.c110
1 files changed, 47 insertions, 63 deletions
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index d3b9c62e73c7..7c037fa9c5b8 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -1,51 +1,29 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc. 3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL 4 * Licensed under the GPL
5 */ 5 */
6 6
7#include "linux/kernel.h" 7#include "linux/stddef.h"
8#include "linux/sched.h" 8#include "linux/err.h"
9#include "linux/interrupt.h" 9#include "linux/hardirq.h"
10#include "linux/string.h"
11#include "linux/mm.h" 10#include "linux/mm.h"
12#include "linux/slab.h" 11#include "linux/personality.h"
13#include "linux/utsname.h"
14#include "linux/fs.h"
15#include "linux/utime.h"
16#include "linux/smp_lock.h"
17#include "linux/module.h"
18#include "linux/init.h"
19#include "linux/capability.h"
20#include "linux/vmalloc.h"
21#include "linux/spinlock.h"
22#include "linux/proc_fs.h" 12#include "linux/proc_fs.h"
23#include "linux/ptrace.h" 13#include "linux/ptrace.h"
24#include "linux/random.h" 14#include "linux/random.h"
25#include "linux/personality.h" 15#include "linux/sched.h"
26#include "asm/unistd.h" 16#include "linux/threads.h"
27#include "asm/mman.h"
28#include "asm/segment.h"
29#include "asm/stat.h"
30#include "asm/pgtable.h" 17#include "asm/pgtable.h"
31#include "asm/processor.h"
32#include "asm/tlbflush.h"
33#include "asm/uaccess.h" 18#include "asm/uaccess.h"
34#include "asm/user.h"
35#include "kern_util.h"
36#include "as-layout.h" 19#include "as-layout.h"
37#include "kern.h" 20#include "kern_util.h"
38#include "signal_kern.h"
39#include "init.h"
40#include "irq_user.h"
41#include "mem_user.h"
42#include "tlb.h"
43#include "frame_kern.h"
44#include "sigcontext.h"
45#include "os.h" 21#include "os.h"
46#include "skas.h" 22#include "skas.h"
23#include "tlb.h"
47 24
48/* This is a per-cpu array. A processor only modifies its entry and it only 25/*
26 * This is a per-cpu array. A processor only modifies its entry and it only
49 * cares about its entry, so it's OK if another processor is modifying its 27 * cares about its entry, so it's OK if another processor is modifying its
50 * entry. 28 * entry.
51 */ 29 */
@@ -54,15 +32,15 @@ struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
54static inline int external_pid(struct task_struct *task) 32static inline int external_pid(struct task_struct *task)
55{ 33{
56 /* FIXME: Need to look up userspace_pid by cpu */ 34 /* FIXME: Need to look up userspace_pid by cpu */
57 return(userspace_pid[0]); 35 return userspace_pid[0];
58} 36}
59 37
60int pid_to_processor_id(int pid) 38int pid_to_processor_id(int pid)
61{ 39{
62 int i; 40 int i;
63 41
64 for(i = 0; i < ncpus; i++){ 42 for(i = 0; i < ncpus; i++) {
65 if(cpu_tasks[i].pid == pid) 43 if (cpu_tasks[i].pid == pid)
66 return i; 44 return i;
67 } 45 }
68 return -1; 46 return -1;
@@ -118,7 +96,7 @@ void *_switch_to(void *prev, void *next, void *last)
118 current->thread.saved_task = NULL; 96 current->thread.saved_task = NULL;
119 97
120 /* XXX need to check runqueues[cpu].idle */ 98 /* XXX need to check runqueues[cpu].idle */
121 if(current->pid == 0) 99 if (current->pid == 0)
122 switch_timers(0); 100 switch_timers(0);
123 101
124 switch_threads(&from->thread.switch_buf, 102 switch_threads(&from->thread.switch_buf,
@@ -126,10 +104,10 @@ void *_switch_to(void *prev, void *next, void *last)
126 104
127 arch_switch_to(current->thread.prev_sched, current); 105 arch_switch_to(current->thread.prev_sched, current);
128 106
129 if(current->pid == 0) 107 if (current->pid == 0)
130 switch_timers(1); 108 switch_timers(1);
131 109
132 if(current->thread.saved_task) 110 if (current->thread.saved_task)
133 show_regs(&(current->thread.regs)); 111 show_regs(&(current->thread.regs));
134 next= current->thread.saved_task; 112 next= current->thread.saved_task;
135 prev= current; 113 prev= current;
@@ -141,9 +119,9 @@ void *_switch_to(void *prev, void *next, void *last)
141 119
142void interrupt_end(void) 120void interrupt_end(void)
143{ 121{
144 if(need_resched()) 122 if (need_resched())
145 schedule(); 123 schedule();
146 if(test_tsk_thread_flag(current, TIF_SIGPENDING)) 124 if (test_tsk_thread_flag(current, TIF_SIGPENDING))
147 do_signal(); 125 do_signal();
148} 126}
149 127
@@ -158,7 +136,8 @@ void *get_current(void)
158 136
159extern void schedule_tail(struct task_struct *prev); 137extern void schedule_tail(struct task_struct *prev);
160 138
161/* This is called magically, by its address being stuffed in a jmp_buf 139/*
140 * This is called magically, by its address being stuffed in a jmp_buf
162 * and being longjmp-d to. 141 * and being longjmp-d to.
163 */ 142 */
164void new_thread_handler(void) 143void new_thread_handler(void)
@@ -166,18 +145,19 @@ void new_thread_handler(void)
166 int (*fn)(void *), n; 145 int (*fn)(void *), n;
167 void *arg; 146 void *arg;
168 147
169 if(current->thread.prev_sched != NULL) 148 if (current->thread.prev_sched != NULL)
170 schedule_tail(current->thread.prev_sched); 149 schedule_tail(current->thread.prev_sched);
171 current->thread.prev_sched = NULL; 150 current->thread.prev_sched = NULL;
172 151
173 fn = current->thread.request.u.thread.proc; 152 fn = current->thread.request.u.thread.proc;
174 arg = current->thread.request.u.thread.arg; 153 arg = current->thread.request.u.thread.arg;
175 154
176 /* The return value is 1 if the kernel thread execs a process, 155 /*
156 * The return value is 1 if the kernel thread execs a process,
177 * 0 if it just exits 157 * 0 if it just exits
178 */ 158 */
179 n = run_kernel_thread(fn, arg, &current->thread.exec_buf); 159 n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
180 if(n == 1){ 160 if (n == 1) {
181 /* Handle any immediate reschedules or signals */ 161 /* Handle any immediate reschedules or signals */
182 interrupt_end(); 162 interrupt_end();
183 userspace(&current->thread.regs.regs); 163 userspace(&current->thread.regs.regs);
@@ -189,14 +169,16 @@ void new_thread_handler(void)
189void fork_handler(void) 169void fork_handler(void)
190{ 170{
191 force_flush_all(); 171 force_flush_all();
192 if(current->thread.prev_sched == NULL) 172 if (current->thread.prev_sched == NULL)
193 panic("blech"); 173 panic("blech");
194 174
195 schedule_tail(current->thread.prev_sched); 175 schedule_tail(current->thread.prev_sched);
196 176
197 /* XXX: if interrupt_end() calls schedule, this call to 177 /*
178 * XXX: if interrupt_end() calls schedule, this call to
198 * arch_switch_to isn't needed. We could want to apply this to 179 * arch_switch_to isn't needed. We could want to apply this to
199 * improve performance. -bb */ 180 * improve performance. -bb
181 */
200 arch_switch_to(current->thread.prev_sched, current); 182 arch_switch_to(current->thread.prev_sched, current);
201 183
202 current->thread.prev_sched = NULL; 184 current->thread.prev_sched = NULL;
@@ -216,11 +198,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
216 198
217 p->thread = (struct thread_struct) INIT_THREAD; 199 p->thread = (struct thread_struct) INIT_THREAD;
218 200
219 if(current->thread.forking){ 201 if (current->thread.forking) {
220 memcpy(&p->thread.regs.regs, &regs->regs, 202 memcpy(&p->thread.regs.regs, &regs->regs,
221 sizeof(p->thread.regs.regs)); 203 sizeof(p->thread.regs.regs));
222 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); 204 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0);
223 if(sp != 0) 205 if (sp != 0)
224 REGS_SP(p->thread.regs.regs.regs) = sp; 206 REGS_SP(p->thread.regs.regs.regs) = sp;
225 207
226 handler = fork_handler; 208 handler = fork_handler;
@@ -259,14 +241,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
259 241
260void default_idle(void) 242void default_idle(void)
261{ 243{
262 while(1){ 244 while(1) {
263 /* endless idle loop with no priority at all */ 245 /* endless idle loop with no priority at all */
264 246
265 /* 247 /*
266 * although we are an idle CPU, we do not want to 248 * although we are an idle CPU, we do not want to
267 * get into the scheduler unnecessarily. 249 * get into the scheduler unnecessarily.
268 */ 250 */
269 if(need_resched()) 251 if (need_resched())
270 schedule(); 252 schedule();
271 253
272 idle_sleep(10); 254 idle_sleep(10);
@@ -288,26 +270,26 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
288 pte_t *pte; 270 pte_t *pte;
289 pte_t ptent; 271 pte_t ptent;
290 272
291 if(task->mm == NULL) 273 if (task->mm == NULL)
292 return ERR_PTR(-EINVAL); 274 return ERR_PTR(-EINVAL);
293 pgd = pgd_offset(task->mm, addr); 275 pgd = pgd_offset(task->mm, addr);
294 if(!pgd_present(*pgd)) 276 if (!pgd_present(*pgd))
295 return ERR_PTR(-EINVAL); 277 return ERR_PTR(-EINVAL);
296 278
297 pud = pud_offset(pgd, addr); 279 pud = pud_offset(pgd, addr);
298 if(!pud_present(*pud)) 280 if (!pud_present(*pud))
299 return ERR_PTR(-EINVAL); 281 return ERR_PTR(-EINVAL);
300 282
301 pmd = pmd_offset(pud, addr); 283 pmd = pmd_offset(pud, addr);
302 if(!pmd_present(*pmd)) 284 if (!pmd_present(*pmd))
303 return ERR_PTR(-EINVAL); 285 return ERR_PTR(-EINVAL);
304 286
305 pte = pte_offset_kernel(pmd, addr); 287 pte = pte_offset_kernel(pmd, addr);
306 ptent = *pte; 288 ptent = *pte;
307 if(!pte_present(ptent)) 289 if (!pte_present(ptent))
308 return ERR_PTR(-EINVAL); 290 return ERR_PTR(-EINVAL);
309 291
310 if(pte_out != NULL) 292 if (pte_out != NULL)
311 *pte_out = ptent; 293 *pte_out = ptent;
312 return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); 294 return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
313} 295}
@@ -380,7 +362,7 @@ int smp_sigio_handler(void)
380#ifdef CONFIG_SMP 362#ifdef CONFIG_SMP
381 int cpu = current_thread->cpu; 363 int cpu = current_thread->cpu;
382 IPI_handler(cpu); 364 IPI_handler(cpu);
383 if(cpu != 0) 365 if (cpu != 0)
384 return 1; 366 return 1;
385#endif 367#endif
386 return 0; 368 return 0;
@@ -408,7 +390,8 @@ int get_using_sysemu(void)
408 390
409static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) 391static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
410{ 392{
411 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ 393 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
394 /* No overflow */
412 *eof = 1; 395 *eof = 1;
413 396
414 return strlen(buf); 397 return strlen(buf);
@@ -423,7 +406,8 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned
423 406
424 if (tmp[0] >= '0' && tmp[0] <= '2') 407 if (tmp[0] >= '0' && tmp[0] <= '2')
425 set_using_sysemu(tmp[0] - '0'); 408 set_using_sysemu(tmp[0] - '0');
426 return count; /*We use the first char, but pretend to write everything*/ 409 /* We use the first char, but pretend to write everything */
410 return count;
427} 411}
428 412
429int __init make_proc_sysemu(void) 413int __init make_proc_sysemu(void)
@@ -453,10 +437,10 @@ int singlestepping(void * t)
453 struct task_struct *task = t ? t : current; 437 struct task_struct *task = t ? t : current;
454 438
455 if ( ! (task->ptrace & PT_DTRACE) ) 439 if ( ! (task->ptrace & PT_DTRACE) )
456 return(0); 440 return 0;
457 441
458 if (task->thread.singlestep_syscall) 442 if (task->thread.singlestep_syscall)
459 return(1); 443 return 1;
460 444
461 return 2; 445 return 2;
462} 446}