aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/um/kernel/exec.c4
-rw-r--r--arch/um/kernel/gmon_syms.c13
-rw-r--r--arch/um/kernel/irq.c34
-rw-r--r--arch/um/kernel/ksyms.c3
-rw-r--r--arch/um/kernel/mem.c10
-rw-r--r--arch/um/kernel/process.c (renamed from arch/um/kernel/process_kern.c)36
-rw-r--r--arch/um/kernel/reboot.c13
-rw-r--r--arch/um/kernel/skas/Makefile3
-rw-r--r--arch/um/kernel/skas/exec.c30
-rw-r--r--arch/um/kernel/skas/exec_kern.c41
-rw-r--r--arch/um/kernel/skas/mmu.c2
-rw-r--r--arch/um/kernel/skas/process.c217
-rw-r--r--arch/um/kernel/skas/process_kern.c533
-rw-r--r--arch/um/kernel/time.c14
-rw-r--r--arch/um/kernel/tlb.c370
-rw-r--r--arch/um/kernel/trap.c28
-rw-r--r--arch/um/kernel/um_arch.c2
18 files changed, 898 insertions, 457 deletions
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index a2d93065b2d0..6fa63a2a89e3 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := vmlinux.lds
7clean-files := 7clean-files :=
8 8
9obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \ 9obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \
10 physmem.o process_kern.o ptrace.o reboot.o resource.o sigio.o \ 10 physmem.o process.o ptrace.o reboot.o resource.o sigio.o \
11 signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o uaccess.o \ 11 signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o uaccess.o \
12 um_arch.o umid.o 12 um_arch.o umid.o
13 13
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index fc38a6d5906d..0561c43b4685 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -41,9 +41,11 @@ static long execve1(char *file, char __user * __user *argv,
41 long error; 41 long error;
42 42
43#ifdef CONFIG_TTY_LOG 43#ifdef CONFIG_TTY_LOG
44 task_lock(current); 44 mutex_lock(&tty_mutex);
45 task_lock(current); /* FIXME: is this needed ? */
45 log_exec(argv, current->signal->tty); 46 log_exec(argv, current->signal->tty);
46 task_unlock(current); 47 task_unlock(current);
48 mutex_unlock(&tty_mutex);
47#endif 49#endif
48 error = do_execve(file, argv, env, &current->thread.regs); 50 error = do_execve(file, argv, env, &current->thread.regs);
49 if (error == 0){ 51 if (error == 0){
diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
index 2c86e7fdb014..13aa115cd1b4 100644
--- a/arch/um/kernel/gmon_syms.c
+++ b/arch/um/kernel/gmon_syms.c
@@ -5,7 +5,7 @@
5 5
6#include "linux/module.h" 6#include "linux/module.h"
7 7
8extern void __bb_init_func(void *); 8extern void __bb_init_func(void *) __attribute__((weak));
9EXPORT_SYMBOL(__bb_init_func); 9EXPORT_SYMBOL(__bb_init_func);
10 10
11/* This is defined (and referred to in profiling stub code) only by some GCC 11/* This is defined (and referred to in profiling stub code) only by some GCC
@@ -21,14 +21,3 @@ EXPORT_SYMBOL(__gcov_init);
21 21
22extern void __gcov_merge_add(void *) __attribute__((weak)); 22extern void __gcov_merge_add(void *) __attribute__((weak));
23EXPORT_SYMBOL(__gcov_merge_add); 23EXPORT_SYMBOL(__gcov_merge_add);
24
25/*
26 * Overrides for Emacs so that we follow Linus's tabbing style.
27 * Emacs will notice this stuff at the end of the file and automatically
28 * adjust the settings for this buffer only. This must remain at the end
29 * of the file.
30 * ---------------------------------------------------------------------------
31 * Local variables:
32 * c-file-style: "linux"
33 * End:
34 */
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 589c69a75043..ce7f233fc490 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -142,19 +142,6 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
142 .events = events, 142 .events = events,
143 .current_events = 0 } ); 143 .current_events = 0 } );
144 144
145 /* Critical section - locked by a spinlock because this stuff can
146 * be changed from interrupt handlers. The stuff above is done
147 * outside the lock because it allocates memory.
148 */
149
150 /* Actually, it only looks like it can be called from interrupt
151 * context. The culprit is reactivate_fd, which calls
152 * maybe_sigio_broken, which calls write_sigio_workaround,
153 * which calls activate_fd. However, write_sigio_workaround should
154 * only be called once, at boot time. That would make it clear that
155 * this is called only from process context, and can be locked with
156 * a semaphore.
157 */
158 spin_lock_irqsave(&irq_lock, flags); 145 spin_lock_irqsave(&irq_lock, flags);
159 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { 146 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
160 if ((irq_fd->fd == fd) && (irq_fd->type == type)) { 147 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
@@ -165,7 +152,6 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
165 } 152 }
166 } 153 }
167 154
168 /*-------------*/
169 if (type == IRQ_WRITE) 155 if (type == IRQ_WRITE)
170 fd = -1; 156 fd = -1;
171 157
@@ -198,7 +184,6 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
198 184
199 spin_lock_irqsave(&irq_lock, flags); 185 spin_lock_irqsave(&irq_lock, flags);
200 } 186 }
201 /*-------------*/
202 187
203 *last_irq_ptr = new_fd; 188 *last_irq_ptr = new_fd;
204 last_irq_ptr = &new_fd->next; 189 last_irq_ptr = &new_fd->next;
@@ -210,14 +195,14 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
210 */ 195 */
211 maybe_sigio_broken(fd, (type == IRQ_READ)); 196 maybe_sigio_broken(fd, (type == IRQ_READ));
212 197
213 return(0); 198 return 0;
214 199
215 out_unlock: 200 out_unlock:
216 spin_unlock_irqrestore(&irq_lock, flags); 201 spin_unlock_irqrestore(&irq_lock, flags);
217 out_kfree: 202 out_kfree:
218 kfree(new_fd); 203 kfree(new_fd);
219 out: 204 out:
220 return(err); 205 return err;
221} 206}
222 207
223static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) 208static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
@@ -302,10 +287,7 @@ void reactivate_fd(int fd, int irqnum)
302 os_set_pollfd(i, irq->fd); 287 os_set_pollfd(i, irq->fd);
303 spin_unlock_irqrestore(&irq_lock, flags); 288 spin_unlock_irqrestore(&irq_lock, flags);
304 289
305 /* This calls activate_fd, so it has to be outside the critical 290 add_sigio_fd(fd);
306 * section.
307 */
308 maybe_sigio_broken(fd, (irq->type == IRQ_READ));
309} 291}
310 292
311void deactivate_fd(int fd, int irqnum) 293void deactivate_fd(int fd, int irqnum)
@@ -316,11 +298,15 @@ void deactivate_fd(int fd, int irqnum)
316 298
317 spin_lock_irqsave(&irq_lock, flags); 299 spin_lock_irqsave(&irq_lock, flags);
318 irq = find_irq_by_fd(fd, irqnum, &i); 300 irq = find_irq_by_fd(fd, irqnum, &i);
319 if (irq == NULL) 301 if(irq == NULL){
320 goto out; 302 spin_unlock_irqrestore(&irq_lock, flags);
303 return;
304 }
305
321 os_set_pollfd(i, -1); 306 os_set_pollfd(i, -1);
322 out:
323 spin_unlock_irqrestore(&irq_lock, flags); 307 spin_unlock_irqrestore(&irq_lock, flags);
308
309 ignore_sigio_fd(fd);
324} 310}
325 311
326int deactivate_all_fds(void) 312int deactivate_all_fds(void)
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index c97045d6d89f..f030e44262ba 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -21,7 +21,6 @@
21#include "mem_user.h" 21#include "mem_user.h"
22#include "os.h" 22#include "os.h"
23 23
24EXPORT_SYMBOL(stop);
25EXPORT_SYMBOL(uml_physmem); 24EXPORT_SYMBOL(uml_physmem);
26EXPORT_SYMBOL(set_signals); 25EXPORT_SYMBOL(set_signals);
27EXPORT_SYMBOL(get_signals); 26EXPORT_SYMBOL(get_signals);
@@ -41,12 +40,14 @@ EXPORT_SYMBOL(handle_page_fault);
41EXPORT_SYMBOL(find_iomem); 40EXPORT_SYMBOL(find_iomem);
42 41
43#ifdef CONFIG_MODE_TT 42#ifdef CONFIG_MODE_TT
43EXPORT_SYMBOL(stop);
44EXPORT_SYMBOL(strncpy_from_user_tt); 44EXPORT_SYMBOL(strncpy_from_user_tt);
45EXPORT_SYMBOL(copy_from_user_tt); 45EXPORT_SYMBOL(copy_from_user_tt);
46EXPORT_SYMBOL(copy_to_user_tt); 46EXPORT_SYMBOL(copy_to_user_tt);
47#endif 47#endif
48 48
49#ifdef CONFIG_MODE_SKAS 49#ifdef CONFIG_MODE_SKAS
50EXPORT_SYMBOL(strnlen_user_skas);
50EXPORT_SYMBOL(strncpy_from_user_skas); 51EXPORT_SYMBOL(strncpy_from_user_skas);
51EXPORT_SYMBOL(copy_to_user_skas); 52EXPORT_SYMBOL(copy_to_user_skas);
52EXPORT_SYMBOL(copy_from_user_skas); 53EXPORT_SYMBOL(copy_from_user_skas);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 61280167c560..c95855ba6ab5 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -79,8 +79,10 @@ void mem_init(void)
79 79
80 /* this will put all low memory onto the freelists */ 80 /* this will put all low memory onto the freelists */
81 totalram_pages = free_all_bootmem(); 81 totalram_pages = free_all_bootmem();
82#ifdef CONFIG_HIGHMEM
82 totalhigh_pages = highmem >> PAGE_SHIFT; 83 totalhigh_pages = highmem >> PAGE_SHIFT;
83 totalram_pages += totalhigh_pages; 84 totalram_pages += totalhigh_pages;
85#endif
84 num_physpages = totalram_pages; 86 num_physpages = totalram_pages;
85 max_pfn = totalram_pages; 87 max_pfn = totalram_pages;
86 printk(KERN_INFO "Memory: %luk available\n", 88 printk(KERN_INFO "Memory: %luk available\n",
@@ -221,10 +223,14 @@ void paging_init(void)
221 223
222 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 224 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
223 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 225 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
224 for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) 226 for(i = 0; i < ARRAY_SIZE(zones_size); i++)
225 zones_size[i] = 0; 227 zones_size[i] = 0;
226 zones_size[ZONE_DMA] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT); 228
229 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
230 (uml_physmem >> PAGE_SHIFT);
231#ifdef CONFIG_HIGHMEM
227 zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT; 232 zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
233#endif
228 free_area_init(zones_size); 234 free_area_init(zones_size);
229 235
230 /* 236 /*
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process.c
index f6a5a502120b..fe6c64abda5b 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process.c
@@ -1,10 +1,9 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc. 3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL 4 * Licensed under the GPL
5 */ 5 */
6 6
7#include "linux/config.h"
8#include "linux/kernel.h" 7#include "linux/kernel.h"
9#include "linux/sched.h" 8#include "linux/sched.h"
10#include "linux/interrupt.h" 9#include "linux/interrupt.h"
@@ -23,6 +22,7 @@
23#include "linux/proc_fs.h" 22#include "linux/proc_fs.h"
24#include "linux/ptrace.h" 23#include "linux/ptrace.h"
25#include "linux/random.h" 24#include "linux/random.h"
25#include "linux/personality.h"
26#include "asm/unistd.h" 26#include "asm/unistd.h"
27#include "asm/mman.h" 27#include "asm/mman.h"
28#include "asm/segment.h" 28#include "asm/segment.h"
@@ -112,11 +112,11 @@ void set_current(void *t)
112 112
113void *_switch_to(void *prev, void *next, void *last) 113void *_switch_to(void *prev, void *next, void *last)
114{ 114{
115 struct task_struct *from = prev; 115 struct task_struct *from = prev;
116 struct task_struct *to= next; 116 struct task_struct *to= next;
117 117
118 to->thread.prev_sched = from; 118 to->thread.prev_sched = from;
119 set_current(to); 119 set_current(to);
120 120
121 do { 121 do {
122 current->thread.saved_task = NULL ; 122 current->thread.saved_task = NULL ;
@@ -127,7 +127,7 @@ void *_switch_to(void *prev, void *next, void *last)
127 prev= current; 127 prev= current;
128 } while(current->thread.saved_task); 128 } while(current->thread.saved_task);
129 129
130 return(current->thread.prev_sched); 130 return(current->thread.prev_sched);
131 131
132} 132}
133 133
@@ -141,19 +141,19 @@ void release_thread(struct task_struct *task)
141{ 141{
142 CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task)); 142 CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task));
143} 143}
144 144
145void exit_thread(void) 145void exit_thread(void)
146{ 146{
147 unprotect_stack((unsigned long) current_thread); 147 unprotect_stack((unsigned long) current_thread);
148} 148}
149 149
150void *get_current(void) 150void *get_current(void)
151{ 151{
152 return(current); 152 return(current);
153} 153}
154 154
155int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 155int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
156 unsigned long stack_top, struct task_struct * p, 156 unsigned long stack_top, struct task_struct * p,
157 struct pt_regs *regs) 157 struct pt_regs *regs)
158{ 158{
159 int ret; 159 int ret;
@@ -182,11 +182,11 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
182 int save_kmalloc_ok = kmalloc_ok; 182 int save_kmalloc_ok = kmalloc_ok;
183 183
184 kmalloc_ok = 0; 184 kmalloc_ok = 0;
185 CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc, 185 CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc,
186 arg); 186 arg);
187 kmalloc_ok = save_kmalloc_ok; 187 kmalloc_ok = save_kmalloc_ok;
188} 188}
189 189
190unsigned long stack_sp(unsigned long page) 190unsigned long stack_sp(unsigned long page)
191{ 191{
192 return(page + PAGE_SIZE - sizeof(void *)); 192 return(page + PAGE_SIZE - sizeof(void *));
@@ -210,7 +210,7 @@ void default_idle(void)
210 */ 210 */
211 if(need_resched()) 211 if(need_resched())
212 schedule(); 212 schedule();
213 213
214 idle_sleep(10); 214 idle_sleep(10);
215 } 215 }
216} 216}
@@ -225,7 +225,7 @@ int page_size(void)
225 return(PAGE_SIZE); 225 return(PAGE_SIZE);
226} 226}
227 227
228void *um_virt_to_phys(struct task_struct *task, unsigned long addr, 228void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
229 pte_t *pte_out) 229 pte_t *pte_out)
230{ 230{
231 pgd_t *pgd; 231 pgd_t *pgd;
@@ -234,7 +234,7 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
234 pte_t *pte; 234 pte_t *pte;
235 pte_t ptent; 235 pte_t ptent;
236 236
237 if(task->mm == NULL) 237 if(task->mm == NULL)
238 return(ERR_PTR(-EINVAL)); 238 return(ERR_PTR(-EINVAL));
239 pgd = pgd_offset(task->mm, addr); 239 pgd = pgd_offset(task->mm, addr);
240 if(!pgd_present(*pgd)) 240 if(!pgd_present(*pgd))
@@ -245,7 +245,7 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
245 return(ERR_PTR(-EINVAL)); 245 return(ERR_PTR(-EINVAL));
246 246
247 pmd = pmd_offset(pud, addr); 247 pmd = pmd_offset(pud, addr);
248 if(!pmd_present(*pmd)) 248 if(!pmd_present(*pmd))
249 return(ERR_PTR(-EINVAL)); 249 return(ERR_PTR(-EINVAL));
250 250
251 pte = pte_offset_kernel(pmd, addr); 251 pte = pte_offset_kernel(pmd, addr);
@@ -270,7 +270,7 @@ char *current_cmd(void)
270 270
271void force_sigbus(void) 271void force_sigbus(void)
272{ 272{
273 printk(KERN_ERR "Killing pid %d because of a lack of memory\n", 273 printk(KERN_ERR "Killing pid %d because of a lack of memory\n",
274 current->pid); 274 current->pid);
275 lock_kernel(); 275 lock_kernel();
276 sigaddset(&current->pending.signal, SIGBUS); 276 sigaddset(&current->pending.signal, SIGBUS);
@@ -476,7 +476,7 @@ int singlestepping(void * t)
476#ifndef arch_align_stack 476#ifndef arch_align_stack
477unsigned long arch_align_stack(unsigned long sp) 477unsigned long arch_align_stack(unsigned long sp)
478{ 478{
479 if (randomize_va_space) 479 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
480 sp -= get_random_int() % 8192; 480 sp -= get_random_int() % 8192;
481 return sp & ~0xf; 481 return sp & ~0xf;
482} 482}
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 3ef73bf2e781..f602623644aa 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -22,7 +22,7 @@ static void kill_idlers(int me)
22 struct task_struct *p; 22 struct task_struct *p;
23 int i; 23 int i;
24 24
25 for(i = 0; i < sizeof(idle_threads)/sizeof(idle_threads[0]); i++){ 25 for(i = 0; i < ARRAY_SIZE(idle_threads); i++){
26 p = idle_threads[i]; 26 p = idle_threads[i];
27 if((p != NULL) && (p->thread.mode.tt.extern_pid != me)) 27 if((p != NULL) && (p->thread.mode.tt.extern_pid != me))
28 os_kill_process(p->thread.mode.tt.extern_pid, 0); 28 os_kill_process(p->thread.mode.tt.extern_pid, 0);
@@ -62,14 +62,3 @@ void machine_halt(void)
62{ 62{
63 machine_power_off(); 63 machine_power_off();
64} 64}
65
66/*
67 * Overrides for Emacs so that we follow Linus's tabbing style.
68 * Emacs will notice this stuff at the end of the file and automatically
69 * adjust the settings for this buffer only. This must remain at the end
70 * of the file.
71 * ---------------------------------------------------------------------------
72 * Local variables:
73 * c-file-style: "linux"
74 * End:
75 */
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index ea3a8e409a6e..3e3fa7e7e3cf 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -3,8 +3,7 @@
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6obj-y := clone.o exec_kern.o mem.o mmu.o process_kern.o \ 6obj-y := clone.o exec.o mem.o mmu.o process.o syscall.o tlb.o uaccess.o
7 syscall.o tlb.o uaccess.o
8 7
9# clone.o is in the stub, so it can't be built with profiling 8# clone.o is in the stub, so it can't be built with profiling
10# GCC hardened also auto-enables -fpic, but we need %ebx so it can't work -> 9# GCC hardened also auto-enables -fpic, but we need %ebx so it can't work ->
diff --git a/arch/um/kernel/skas/exec.c b/arch/um/kernel/skas/exec.c
new file mode 100644
index 000000000000..54b795951372
--- /dev/null
+++ b/arch/um/kernel/skas/exec.c
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/kernel.h"
7#include "asm/current.h"
8#include "asm/page.h"
9#include "asm/signal.h"
10#include "asm/ptrace.h"
11#include "asm/uaccess.h"
12#include "asm/mmu_context.h"
13#include "tlb.h"
14#include "skas.h"
15#include "um_mmu.h"
16#include "os.h"
17
18void flush_thread_skas(void)
19{
20 force_flush_all();
21 switch_mm_skas(&current->mm->context.skas.id);
22}
23
24void start_thread_skas(struct pt_regs *regs, unsigned long eip,
25 unsigned long esp)
26{
27 set_fs(USER_DS);
28 PT_REGS_IP(regs) = eip;
29 PT_REGS_SP(regs) = esp;
30}
diff --git a/arch/um/kernel/skas/exec_kern.c b/arch/um/kernel/skas/exec_kern.c
deleted file mode 100644
index 77ed7bbab219..000000000000
--- a/arch/um/kernel/skas/exec_kern.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/kernel.h"
7#include "asm/current.h"
8#include "asm/page.h"
9#include "asm/signal.h"
10#include "asm/ptrace.h"
11#include "asm/uaccess.h"
12#include "asm/mmu_context.h"
13#include "tlb.h"
14#include "skas.h"
15#include "um_mmu.h"
16#include "os.h"
17
18void flush_thread_skas(void)
19{
20 force_flush_all();
21 switch_mm_skas(&current->mm->context.skas.id);
22}
23
24void start_thread_skas(struct pt_regs *regs, unsigned long eip,
25 unsigned long esp)
26{
27 set_fs(USER_DS);
28 PT_REGS_IP(regs) = eip;
29 PT_REGS_SP(regs) = esp;
30}
31
32/*
33 * Overrides for Emacs so that we follow Linus's tabbing style.
34 * Emacs will notice this stuff at the end of the file and automatically
35 * adjust the settings for this buffer only. This must remain at the end
36 * of the file.
37 * ---------------------------------------------------------------------------
38 * Local variables:
39 * c-file-style: "linux"
40 * End:
41 */
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 624ca238d1fd..79c22707a637 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -55,7 +55,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
55 * destroy_context_skas. 55 * destroy_context_skas.
56 */ 56 */
57 57
58 mm->context.skas.last_page_table = pmd_page_kernel(*pmd); 58 mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
59#ifdef CONFIG_3_LEVEL_PGTABLES 59#ifdef CONFIG_3_LEVEL_PGTABLES
60 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); 60 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
61#endif 61#endif
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
new file mode 100644
index 000000000000..ae4fa71d3b8b
--- /dev/null
+++ b/arch/um/kernel/skas/process.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/slab.h"
8#include "linux/ptrace.h"
9#include "linux/proc_fs.h"
10#include "linux/file.h"
11#include "linux/errno.h"
12#include "linux/init.h"
13#include "asm/uaccess.h"
14#include "asm/atomic.h"
15#include "kern_util.h"
16#include "skas.h"
17#include "os.h"
18#include "user_util.h"
19#include "tlb.h"
20#include "kern.h"
21#include "mode.h"
22#include "registers.h"
23
24void switch_to_skas(void *prev, void *next)
25{
26 struct task_struct *from, *to;
27
28 from = prev;
29 to = next;
30
31 /* XXX need to check runqueues[cpu].idle */
32 if(current->pid == 0)
33 switch_timers(0);
34
35 switch_threads(&from->thread.mode.skas.switch_buf,
36 &to->thread.mode.skas.switch_buf);
37
38 arch_switch_to_skas(current->thread.prev_sched, current);
39
40 if(current->pid == 0)
41 switch_timers(1);
42}
43
44extern void schedule_tail(struct task_struct *prev);
45
46/* This is called magically, by its address being stuffed in a jmp_buf
47 * and being longjmp-d to.
48 */
49void new_thread_handler(void)
50{
51 int (*fn)(void *), n;
52 void *arg;
53
54 if(current->thread.prev_sched != NULL)
55 schedule_tail(current->thread.prev_sched);
56 current->thread.prev_sched = NULL;
57
58 fn = current->thread.request.u.thread.proc;
59 arg = current->thread.request.u.thread.arg;
60
61 /* The return value is 1 if the kernel thread execs a process,
62 * 0 if it just exits
63 */
64 n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
65 if(n == 1){
66 /* Handle any immediate reschedules or signals */
67 interrupt_end();
68 userspace(&current->thread.regs.regs);
69 }
70 else do_exit(0);
71}
72
73void release_thread_skas(struct task_struct *task)
74{
75}
76
77/* Called magically, see new_thread_handler above */
78void fork_handler(void)
79{
80 force_flush_all();
81 if(current->thread.prev_sched == NULL)
82 panic("blech");
83
84 schedule_tail(current->thread.prev_sched);
85
86 /* XXX: if interrupt_end() calls schedule, this call to
87 * arch_switch_to_skas isn't needed. We could want to apply this to
88 * improve performance. -bb */
89 arch_switch_to_skas(current->thread.prev_sched, current);
90
91 current->thread.prev_sched = NULL;
92
93/* Handle any immediate reschedules or signals */
94 interrupt_end();
95
96 userspace(&current->thread.regs.regs);
97}
98
99int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
100 unsigned long stack_top, struct task_struct * p,
101 struct pt_regs *regs)
102{
103 void (*handler)(void);
104
105 if(current->thread.forking){
106 memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
107 sizeof(p->thread.regs.regs.skas));
108 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
109 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
110
111 handler = fork_handler;
112
113 arch_copy_thread(&current->thread.arch, &p->thread.arch);
114 }
115 else {
116 init_thread_registers(&p->thread.regs.regs);
117 p->thread.request.u.thread = current->thread.request.u.thread;
118 handler = new_thread_handler;
119 }
120
121 new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
122 handler);
123 return(0);
124}
125
126int new_mm(unsigned long stack)
127{
128 int fd;
129
130 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
131 if(fd < 0)
132 return(fd);
133
134 if(skas_needs_stub)
135 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
136
137 return(fd);
138}
139
140void init_idle_skas(void)
141{
142 cpu_tasks[current_thread->cpu].pid = os_getpid();
143 default_idle();
144}
145
146extern void start_kernel(void);
147
148static int start_kernel_proc(void *unused)
149{
150 int pid;
151
152 block_signals();
153 pid = os_getpid();
154
155 cpu_tasks[0].pid = pid;
156 cpu_tasks[0].task = current;
157#ifdef CONFIG_SMP
158 cpu_online_map = cpumask_of_cpu(0);
159#endif
160 start_kernel();
161 return(0);
162}
163
164extern int userspace_pid[];
165
166int start_uml_skas(void)
167{
168 if(proc_mm)
169 userspace_pid[0] = start_userspace(0);
170
171 init_new_thread_signals();
172
173 init_task.thread.request.u.thread.proc = start_kernel_proc;
174 init_task.thread.request.u.thread.arg = NULL;
175 return(start_idle_thread(task_stack_page(&init_task),
176 &init_task.thread.mode.skas.switch_buf));
177}
178
179int external_pid_skas(struct task_struct *task)
180{
181#warning Need to look up userspace_pid by cpu
182 return(userspace_pid[0]);
183}
184
185int thread_pid_skas(struct task_struct *task)
186{
187#warning Need to look up userspace_pid by cpu
188 return(userspace_pid[0]);
189}
190
191void kill_off_processes_skas(void)
192{
193 if(proc_mm)
194#warning need to loop over userspace_pids in kill_off_processes_skas
195 os_kill_ptraced_process(userspace_pid[0], 1);
196 else {
197 struct task_struct *p;
198 int pid, me;
199
200 me = os_getpid();
201 for_each_process(p){
202 if(p->mm == NULL)
203 continue;
204
205 pid = p->mm->context.skas.id.u.pid;
206 os_kill_ptraced_process(pid, 1);
207 }
208 }
209}
210
211unsigned long current_stub_stack(void)
212{
213 if(current->mm == NULL)
214 return(0);
215
216 return(current->mm->context.skas.id.stack);
217}
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 55caeec8b257..0f3d5d084dc7 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -1,227 +1,484 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
3 * Licensed under the GPL 4 * Licensed under the GPL
4 */ 5 */
5 6
7#include "linux/config.h"
8#include "linux/kernel.h"
6#include "linux/sched.h" 9#include "linux/sched.h"
10#include "linux/interrupt.h"
11#include "linux/string.h"
12#include "linux/mm.h"
7#include "linux/slab.h" 13#include "linux/slab.h"
8#include "linux/ptrace.h" 14#include "linux/utsname.h"
9#include "linux/proc_fs.h" 15#include "linux/fs.h"
10#include "linux/file.h" 16#include "linux/utime.h"
11#include "linux/errno.h" 17#include "linux/smp_lock.h"
18#include "linux/module.h"
12#include "linux/init.h" 19#include "linux/init.h"
20#include "linux/capability.h"
21#include "linux/vmalloc.h"
22#include "linux/spinlock.h"
23#include "linux/proc_fs.h"
24#include "linux/ptrace.h"
25#include "linux/random.h"
26#include "linux/personality.h"
27#include "asm/unistd.h"
28#include "asm/mman.h"
29#include "asm/segment.h"
30#include "asm/stat.h"
31#include "asm/pgtable.h"
32#include "asm/processor.h"
33#include "asm/tlbflush.h"
13#include "asm/uaccess.h" 34#include "asm/uaccess.h"
14#include "asm/atomic.h" 35#include "asm/user.h"
15#include "kern_util.h"
16#include "skas.h"
17#include "os.h"
18#include "user_util.h" 36#include "user_util.h"
19#include "tlb.h" 37#include "kern_util.h"
20#include "kern.h" 38#include "kern.h"
39#include "signal_kern.h"
40#include "init.h"
41#include "irq_user.h"
42#include "mem_user.h"
43#include "tlb.h"
44#include "frame_kern.h"
45#include "sigcontext.h"
46#include "os.h"
21#include "mode.h" 47#include "mode.h"
22#include "registers.h" 48#include "mode_kern.h"
49#include "choose-mode.h"
50
51/* This is a per-cpu array. A processor only modifies its entry and it only
52 * cares about its entry, so it's OK if another processor is modifying its
53 * entry.
54 */
55struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
56
57int external_pid(void *t)
58{
59 struct task_struct *task = t ? t : current;
60
61 return(CHOOSE_MODE_PROC(external_pid_tt, external_pid_skas, task));
62}
63
64int pid_to_processor_id(int pid)
65{
66 int i;
67
68 for(i = 0; i < ncpus; i++){
69 if(cpu_tasks[i].pid == pid) return(i);
70 }
71 return(-1);
72}
73
74void free_stack(unsigned long stack, int order)
75{
76 free_pages(stack, order);
77}
78
79unsigned long alloc_stack(int order, int atomic)
80{
81 unsigned long page;
82 gfp_t flags = GFP_KERNEL;
83
84 if (atomic)
85 flags = GFP_ATOMIC;
86 page = __get_free_pages(flags, order);
87 if(page == 0)
88 return(0);
89 stack_protections(page);
90 return(page);
91}
92
93int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
94{
95 int pid;
96
97 current->thread.request.u.thread.proc = fn;
98 current->thread.request.u.thread.arg = arg;
99 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
100 &current->thread.regs, 0, NULL, NULL);
101 if(pid < 0)
102 panic("do_fork failed in kernel_thread, errno = %d", pid);
103 return(pid);
104}
23 105
24void switch_to_skas(void *prev, void *next) 106void set_current(void *t)
25{ 107{
26 struct task_struct *from, *to; 108 struct task_struct *task = t;
27 109
28 from = prev; 110 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
29 to = next; 111 { external_pid(task), task });
112}
30 113
31 /* XXX need to check runqueues[cpu].idle */ 114void *_switch_to(void *prev, void *next, void *last)
32 if(current->pid == 0) 115{
33 switch_timers(0); 116 struct task_struct *from = prev;
117 struct task_struct *to= next;
34 118
35 switch_threads(&from->thread.mode.skas.switch_buf, 119 to->thread.prev_sched = from;
36 to->thread.mode.skas.switch_buf); 120 set_current(to);
37 121
38 arch_switch_to_skas(current->thread.prev_sched, current); 122 do {
123 current->thread.saved_task = NULL ;
124 CHOOSE_MODE_PROC(switch_to_tt, switch_to_skas, prev, next);
125 if(current->thread.saved_task)
126 show_regs(&(current->thread.regs));
127 next= current->thread.saved_task;
128 prev= current;
129 } while(current->thread.saved_task);
130
131 return(current->thread.prev_sched);
39 132
40 if(current->pid == 0)
41 switch_timers(1);
42} 133}
43 134
44extern void schedule_tail(struct task_struct *prev); 135void interrupt_end(void)
136{
137 if(need_resched()) schedule();
138 if(test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal();
139}
45 140
46void new_thread_handler(int sig) 141void release_thread(struct task_struct *task)
47{ 142{
48 int (*fn)(void *), n; 143 CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task));
49 void *arg; 144}
50 145
51 fn = current->thread.request.u.thread.proc; 146void exit_thread(void)
52 arg = current->thread.request.u.thread.arg; 147{
53 os_usr1_signal(1); 148 unprotect_stack((unsigned long) current_thread);
54 thread_wait(&current->thread.mode.skas.switch_buf, 149}
55 current->thread.mode.skas.fork_buf);
56 150
57 if(current->thread.prev_sched != NULL) 151void *get_current(void)
58 schedule_tail(current->thread.prev_sched); 152{
59 current->thread.prev_sched = NULL; 153 return(current);
154}
155
156int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
157 unsigned long stack_top, struct task_struct * p,
158 struct pt_regs *regs)
159{
160 int ret;
60 161
61 /* The return value is 1 if the kernel thread execs a process, 162 p->thread = (struct thread_struct) INIT_THREAD;
62 * 0 if it just exits 163 ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
164 clone_flags, sp, stack_top, p, regs);
165
166 if (ret || !current->thread.forking)
167 goto out;
168
169 clear_flushed_tls(p);
170
171 /*
172 * Set a new TLS for the child thread?
63 */ 173 */
64 n = run_kernel_thread(fn, arg, &current->thread.exec_buf); 174 if (clone_flags & CLONE_SETTLS)
65 if(n == 1){ 175 ret = arch_copy_tls(p);
66 /* Handle any immediate reschedules or signals */ 176
67 interrupt_end(); 177out:
68 userspace(&current->thread.regs.regs); 178 return ret;
179}
180
181void initial_thread_cb(void (*proc)(void *), void *arg)
182{
183 int save_kmalloc_ok = kmalloc_ok;
184
185 kmalloc_ok = 0;
186 CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc,
187 arg);
188 kmalloc_ok = save_kmalloc_ok;
189}
190
191unsigned long stack_sp(unsigned long page)
192{
193 return(page + PAGE_SIZE - sizeof(void *));
194}
195
196int current_pid(void)
197{
198 return(current->pid);
199}
200
201void default_idle(void)
202{
203 CHOOSE_MODE(uml_idle_timer(), (void) 0);
204
205 while(1){
206 /* endless idle loop with no priority at all */
207
208 /*
209 * although we are an idle CPU, we do not want to
210 * get into the scheduler unnecessarily.
211 */
212 if(need_resched())
213 schedule();
214
215 idle_sleep(10);
69 } 216 }
70 else do_exit(0);
71} 217}
72 218
73void new_thread_proc(void *stack, void (*handler)(int sig)) 219void cpu_idle(void)
74{ 220{
75 init_new_thread_stack(stack, handler); 221 CHOOSE_MODE(init_idle_tt(), init_idle_skas());
76 os_usr1_process(os_getpid());
77} 222}
78 223
79void release_thread_skas(struct task_struct *task) 224int page_size(void)
80{ 225{
226 return(PAGE_SIZE);
81} 227}
82 228
83void fork_handler(int sig) 229void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
230 pte_t *pte_out)
84{ 231{
85 os_usr1_signal(1); 232 pgd_t *pgd;
86 thread_wait(&current->thread.mode.skas.switch_buf, 233 pud_t *pud;
87 current->thread.mode.skas.fork_buf); 234 pmd_t *pmd;
88 235 pte_t *pte;
89 force_flush_all(); 236 pte_t ptent;
90 if(current->thread.prev_sched == NULL) 237
91 panic("blech"); 238 if(task->mm == NULL)
239 return(ERR_PTR(-EINVAL));
240 pgd = pgd_offset(task->mm, addr);
241 if(!pgd_present(*pgd))
242 return(ERR_PTR(-EINVAL));
243
244 pud = pud_offset(pgd, addr);
245 if(!pud_present(*pud))
246 return(ERR_PTR(-EINVAL));
247
248 pmd = pmd_offset(pud, addr);
249 if(!pmd_present(*pmd))
250 return(ERR_PTR(-EINVAL));
251
252 pte = pte_offset_kernel(pmd, addr);
253 ptent = *pte;
254 if(!pte_present(ptent))
255 return(ERR_PTR(-EINVAL));
256
257 if(pte_out != NULL)
258 *pte_out = ptent;
259 return((void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK));
260}
92 261
93 schedule_tail(current->thread.prev_sched); 262char *current_cmd(void)
263{
264#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
265 return("(Unknown)");
266#else
267 void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
268 return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
269#endif
270}
94 271
95 /* XXX: if interrupt_end() calls schedule, this call to 272void force_sigbus(void)
96 * arch_switch_to_skas isn't needed. We could want to apply this to 273{
97 * improve performance. -bb */ 274 printk(KERN_ERR "Killing pid %d because of a lack of memory\n",
98 arch_switch_to_skas(current->thread.prev_sched, current); 275 current->pid);
276 lock_kernel();
277 sigaddset(&current->pending.signal, SIGBUS);
278 recalc_sigpending();
279 current->flags |= PF_SIGNALED;
280 do_exit(SIGBUS | 0x80);
281}
99 282
100 current->thread.prev_sched = NULL; 283void dump_thread(struct pt_regs *regs, struct user *u)
284{
285}
101 286
102/* Handle any immediate reschedules or signals */ 287void enable_hlt(void)
103 interrupt_end(); 288{
289 panic("enable_hlt");
290}
291
292EXPORT_SYMBOL(enable_hlt);
104 293
105 userspace(&current->thread.regs.regs); 294void disable_hlt(void)
295{
296 panic("disable_hlt");
106} 297}
107 298
108int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp, 299EXPORT_SYMBOL(disable_hlt);
109 unsigned long stack_top, struct task_struct * p, 300
110 struct pt_regs *regs) 301void *um_kmalloc(int size)
111{ 302{
112 void (*handler)(int); 303 return kmalloc(size, GFP_KERNEL);
304}
113 305
114 if(current->thread.forking){ 306void *um_kmalloc_atomic(int size)
115 memcpy(&p->thread.regs.regs.skas, &regs->regs.skas, 307{
116 sizeof(p->thread.regs.regs.skas)); 308 return kmalloc(size, GFP_ATOMIC);
117 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0); 309}
118 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
119 310
120 handler = fork_handler; 311void *um_vmalloc(int size)
312{
313 return vmalloc(size);
314}
121 315
122 arch_copy_thread(&current->thread.arch, &p->thread.arch); 316void *um_vmalloc_atomic(int size)
123 } 317{
124 else { 318 return __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL);
125 init_thread_registers(&p->thread.regs.regs); 319}
126 p->thread.request.u.thread = current->thread.request.u.thread;
127 handler = new_thread_handler;
128 }
129 320
130 new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf, 321int __cant_sleep(void) {
131 &p->thread.mode.skas.fork_buf, handler); 322 return in_atomic() || irqs_disabled() || in_interrupt();
132 return(0); 323 /* Is in_interrupt() really needed? */
324}
325
326unsigned long get_fault_addr(void)
327{
328 return((unsigned long) current->thread.fault_addr);
133} 329}
134 330
135int new_mm(unsigned long stack) 331EXPORT_SYMBOL(get_fault_addr);
332
333void not_implemented(void)
136{ 334{
137 int fd; 335 printk(KERN_DEBUG "Something isn't implemented in here\n");
336}
138 337
139 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); 338EXPORT_SYMBOL(not_implemented);
140 if(fd < 0)
141 return(fd);
142 339
143 if(skas_needs_stub) 340int user_context(unsigned long sp)
144 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); 341{
342 unsigned long stack;
145 343
146 return(fd); 344 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
345 return(stack != (unsigned long) current_thread);
147} 346}
148 347
149void init_idle_skas(void) 348extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
349
350void do_uml_exitcalls(void)
150{ 351{
151 cpu_tasks[current_thread->cpu].pid = os_getpid(); 352 exitcall_t *call;
152 default_idle(); 353
354 call = &__uml_exitcall_end;
355 while (--call >= &__uml_exitcall_begin)
356 (*call)();
153} 357}
154 358
155extern void start_kernel(void); 359char *uml_strdup(char *string)
360{
361 return kstrdup(string, GFP_KERNEL);
362}
156 363
157static int start_kernel_proc(void *unused) 364int copy_to_user_proc(void __user *to, void *from, int size)
158{ 365{
159 int pid; 366 return(copy_to_user(to, from, size));
367}
368
369int copy_from_user_proc(void *to, void __user *from, int size)
370{
371 return(copy_from_user(to, from, size));
372}
373
374int clear_user_proc(void __user *buf, int size)
375{
376 return(clear_user(buf, size));
377}
160 378
161 block_signals(); 379int strlen_user_proc(char __user *str)
162 pid = os_getpid(); 380{
381 return(strlen_user(str));
382}
163 383
164 cpu_tasks[0].pid = pid; 384int smp_sigio_handler(void)
165 cpu_tasks[0].task = current; 385{
166#ifdef CONFIG_SMP 386#ifdef CONFIG_SMP
167 cpu_online_map = cpumask_of_cpu(0); 387 int cpu = current_thread->cpu;
388 IPI_handler(cpu);
389 if(cpu != 0)
390 return(1);
168#endif 391#endif
169 start_kernel();
170 return(0); 392 return(0);
171} 393}
172 394
173extern int userspace_pid[]; 395int cpu(void)
174
175int start_uml_skas(void)
176{ 396{
177 if(proc_mm) 397 return(current_thread->cpu);
178 userspace_pid[0] = start_userspace(0); 398}
399
400static atomic_t using_sysemu = ATOMIC_INIT(0);
401int sysemu_supported;
179 402
180 init_new_thread_signals(); 403void set_using_sysemu(int value)
404{
405 if (value > sysemu_supported)
406 return;
407 atomic_set(&using_sysemu, value);
408}
181 409
182 init_task.thread.request.u.thread.proc = start_kernel_proc; 410int get_using_sysemu(void)
183 init_task.thread.request.u.thread.arg = NULL; 411{
184 return(start_idle_thread(task_stack_page(&init_task), 412 return atomic_read(&using_sysemu);
185 &init_task.thread.mode.skas.switch_buf,
186 &init_task.thread.mode.skas.fork_buf));
187} 413}
188 414
189int external_pid_skas(struct task_struct *task) 415static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
190{ 416{
191#warning Need to look up userspace_pid by cpu 417 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/
192 return(userspace_pid[0]); 418 *eof = 1;
419
420 return strlen(buf);
193} 421}
194 422
195int thread_pid_skas(struct task_struct *task) 423static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
196{ 424{
197#warning Need to look up userspace_pid by cpu 425 char tmp[2];
198 return(userspace_pid[0]); 426
427 if (copy_from_user(tmp, buf, 1))
428 return -EFAULT;
429
430 if (tmp[0] >= '0' && tmp[0] <= '2')
431 set_using_sysemu(tmp[0] - '0');
432 return count; /*We use the first char, but pretend to write everything*/
199} 433}
200 434
201void kill_off_processes_skas(void) 435int __init make_proc_sysemu(void)
202{ 436{
203 if(proc_mm) 437 struct proc_dir_entry *ent;
204#warning need to loop over userspace_pids in kill_off_processes_skas 438 if (!sysemu_supported)
205 os_kill_ptraced_process(userspace_pid[0], 1); 439 return 0;
206 else {
207 struct task_struct *p;
208 int pid, me;
209 440
210 me = os_getpid(); 441 ent = create_proc_entry("sysemu", 0600, &proc_root);
211 for_each_process(p){
212 if(p->mm == NULL)
213 continue;
214 442
215 pid = p->mm->context.skas.id.u.pid; 443 if (ent == NULL)
216 os_kill_ptraced_process(pid, 1); 444 {
217 } 445 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
446 return(0);
218 } 447 }
448
449 ent->read_proc = proc_read_sysemu;
450 ent->write_proc = proc_write_sysemu;
451
452 return 0;
219} 453}
220 454
221unsigned long current_stub_stack(void) 455late_initcall(make_proc_sysemu);
456
457int singlestepping(void * t)
222{ 458{
223 if(current->mm == NULL) 459 struct task_struct *task = t ? t : current;
460
461 if ( ! (task->ptrace & PT_DTRACE) )
224 return(0); 462 return(0);
225 463
226 return(current->mm->context.skas.id.stack); 464 if (task->thread.singlestep_syscall)
465 return(1);
466
467 return 2;
468}
469
470/*
471 * Only x86 and x86_64 have an arch_align_stack().
472 * All other arches have "#define arch_align_stack(x) (x)"
473 * in their asm/system.h
474 * As this is included in UML from asm-um/system-generic.h,
475 * we can use it to behave as the subarch does.
476 */
477#ifndef arch_align_stack
478unsigned long arch_align_stack(unsigned long sp)
479{
480 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
481 sp -= get_random_int() % 8192;
482 return sp & ~0xf;
227} 483}
484#endif
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 552ca1cb9847..820affbf3e16 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -35,9 +35,6 @@ unsigned long long sched_clock(void)
35 return (unsigned long long)jiffies_64 * (1000000000 / HZ); 35 return (unsigned long long)jiffies_64 * (1000000000 / HZ);
36} 36}
37 37
38/* Changed at early boot */
39int timer_irq_inited = 0;
40
41static unsigned long long prev_nsecs; 38static unsigned long long prev_nsecs;
42#ifdef CONFIG_UML_REAL_TIME_CLOCK 39#ifdef CONFIG_UML_REAL_TIME_CLOCK
43static long long delta; /* Deviation per interval */ 40static long long delta; /* Deviation per interval */
@@ -98,7 +95,7 @@ irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs)
98 95
99 do_timer(regs); 96 do_timer(regs);
100 97
101 nsecs = get_time() + local_offset; 98 nsecs = get_time();
102 xtime.tv_sec = nsecs / NSEC_PER_SEC; 99 xtime.tv_sec = nsecs / NSEC_PER_SEC;
103 xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC; 100 xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC;
104 101
@@ -113,12 +110,13 @@ static void register_timer(void)
113 110
114 err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL); 111 err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL);
115 if(err != 0) 112 if(err != 0)
116 printk(KERN_ERR "timer_init : request_irq failed - " 113 printk(KERN_ERR "register_timer : request_irq failed - "
117 "errno = %d\n", -err); 114 "errno = %d\n", -err);
118 115
119 timer_irq_inited = 1; 116 err = set_interval(1);
120 117 if(err != 0)
121 user_time_init(); 118 printk(KERN_ERR "register_timer : set_interval failed - "
119 "errno = %d\n", -err);
122} 120}
123 121
124extern void (*late_time_init)(void); 122extern void (*late_time_init)(void);
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index f5b0636f9ad7..54a5ff25645a 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -16,12 +16,12 @@
16#include "os.h" 16#include "os.h"
17 17
18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 int r, int w, int x, struct host_vm_op *ops, int *index, 19 int r, int w, int x, struct host_vm_op *ops, int *index,
20 int last_filled, union mm_context *mmu, void **flush, 20 int last_filled, union mm_context *mmu, void **flush,
21 int (*do_ops)(union mm_context *, struct host_vm_op *, 21 int (*do_ops)(union mm_context *, struct host_vm_op *,
22 int, int, void **)) 22 int, int, void **))
23{ 23{
24 __u64 offset; 24 __u64 offset;
25 struct host_vm_op *last; 25 struct host_vm_op *last;
26 int fd, ret = 0; 26 int fd, ret = 0;
27 27
@@ -89,7 +89,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
89static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, 89static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
90 int x, struct host_vm_op *ops, int *index, 90 int x, struct host_vm_op *ops, int *index,
91 int last_filled, union mm_context *mmu, void **flush, 91 int last_filled, union mm_context *mmu, void **flush,
92 int (*do_ops)(union mm_context *, struct host_vm_op *, 92 int (*do_ops)(union mm_context *, struct host_vm_op *,
93 int, int, void **)) 93 int, int, void **))
94{ 94{
95 struct host_vm_op *last; 95 struct host_vm_op *last;
@@ -124,105 +124,105 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
124#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) 124#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
125 125
126void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 126void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
127 unsigned long end_addr, int force, 127 unsigned long end_addr, int force,
128 int (*do_ops)(union mm_context *, struct host_vm_op *, 128 int (*do_ops)(union mm_context *, struct host_vm_op *,
129 int, int, void **)) 129 int, int, void **))
130{ 130{
131 pgd_t *npgd; 131 pgd_t *npgd;
132 pud_t *npud; 132 pud_t *npud;
133 pmd_t *npmd; 133 pmd_t *npmd;
134 pte_t *npte; 134 pte_t *npte;
135 union mm_context *mmu = &mm->context; 135 union mm_context *mmu = &mm->context;
136 unsigned long addr, end; 136 unsigned long addr, end;
137 int r, w, x; 137 int r, w, x;
138 struct host_vm_op ops[1]; 138 struct host_vm_op ops[1];
139 void *flush = NULL; 139 void *flush = NULL;
140 int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1; 140 int op_index = -1, last_op = ARRAY_SIZE(ops) - 1;
141 int ret = 0; 141 int ret = 0;
142 142
143 if(mm == NULL) return; 143 if(mm == NULL)
144 144 return;
145 ops[0].type = NONE; 145
146 for(addr = start_addr; addr < end_addr && !ret;){ 146 ops[0].type = NONE;
147 npgd = pgd_offset(mm, addr); 147 for(addr = start_addr; addr < end_addr && !ret;){
148 if(!pgd_present(*npgd)){ 148 npgd = pgd_offset(mm, addr);
149 end = ADD_ROUND(addr, PGDIR_SIZE); 149 if(!pgd_present(*npgd)){
150 if(end > end_addr) 150 end = ADD_ROUND(addr, PGDIR_SIZE);
151 end = end_addr; 151 if(end > end_addr)
152 if(force || pgd_newpage(*npgd)){ 152 end = end_addr;
153 ret = add_munmap(addr, end - addr, ops, 153 if(force || pgd_newpage(*npgd)){
154 &op_index, last_op, mmu, 154 ret = add_munmap(addr, end - addr, ops,
155 &flush, do_ops); 155 &op_index, last_op, mmu,
156 pgd_mkuptodate(*npgd); 156 &flush, do_ops);
157 } 157 pgd_mkuptodate(*npgd);
158 addr = end; 158 }
159 continue; 159 addr = end;
160 } 160 continue;
161 161 }
162 npud = pud_offset(npgd, addr); 162
163 if(!pud_present(*npud)){ 163 npud = pud_offset(npgd, addr);
164 end = ADD_ROUND(addr, PUD_SIZE); 164 if(!pud_present(*npud)){
165 if(end > end_addr) 165 end = ADD_ROUND(addr, PUD_SIZE);
166 end = end_addr; 166 if(end > end_addr)
167 if(force || pud_newpage(*npud)){ 167 end = end_addr;
168 ret = add_munmap(addr, end - addr, ops, 168 if(force || pud_newpage(*npud)){
169 &op_index, last_op, mmu, 169 ret = add_munmap(addr, end - addr, ops,
170 &flush, do_ops); 170 &op_index, last_op, mmu,
171 pud_mkuptodate(*npud); 171 &flush, do_ops);
172 } 172 pud_mkuptodate(*npud);
173 addr = end; 173 }
174 continue; 174 addr = end;
175 } 175 continue;
176 176 }
177 npmd = pmd_offset(npud, addr); 177
178 if(!pmd_present(*npmd)){ 178 npmd = pmd_offset(npud, addr);
179 end = ADD_ROUND(addr, PMD_SIZE); 179 if(!pmd_present(*npmd)){
180 if(end > end_addr) 180 end = ADD_ROUND(addr, PMD_SIZE);
181 end = end_addr; 181 if(end > end_addr)
182 if(force || pmd_newpage(*npmd)){ 182 end = end_addr;
183 ret = add_munmap(addr, end - addr, ops, 183 if(force || pmd_newpage(*npmd)){
184 &op_index, last_op, mmu, 184 ret = add_munmap(addr, end - addr, ops,
185 &flush, do_ops); 185 &op_index, last_op, mmu,
186 pmd_mkuptodate(*npmd); 186 &flush, do_ops);
187 } 187 pmd_mkuptodate(*npmd);
188 addr = end; 188 }
189 continue; 189 addr = end;
190 } 190 continue;
191 191 }
192 npte = pte_offset_kernel(npmd, addr); 192
193 r = pte_read(*npte); 193 npte = pte_offset_kernel(npmd, addr);
194 w = pte_write(*npte); 194 r = pte_read(*npte);
195 x = pte_exec(*npte); 195 w = pte_write(*npte);
196 x = pte_exec(*npte);
196 if (!pte_young(*npte)) { 197 if (!pte_young(*npte)) {
197 r = 0; 198 r = 0;
198 w = 0; 199 w = 0;
199 } else if (!pte_dirty(*npte)) { 200 } else if (!pte_dirty(*npte)) {
200 w = 0; 201 w = 0;
201 } 202 }
202 if(force || pte_newpage(*npte)){ 203 if(force || pte_newpage(*npte)){
203 if(pte_present(*npte)) 204 if(pte_present(*npte))
204 ret = add_mmap(addr, 205 ret = add_mmap(addr,
205 pte_val(*npte) & PAGE_MASK, 206 pte_val(*npte) & PAGE_MASK,
206 PAGE_SIZE, r, w, x, ops, 207 PAGE_SIZE, r, w, x, ops,
207 &op_index, last_op, mmu, 208 &op_index, last_op, mmu,
208 &flush, do_ops); 209 &flush, do_ops);
209 else ret = add_munmap(addr, PAGE_SIZE, ops, 210 else ret = add_munmap(addr, PAGE_SIZE, ops,
210 &op_index, last_op, mmu, 211 &op_index, last_op, mmu,
211 &flush, do_ops); 212 &flush, do_ops);
212 } 213 }
213 else if(pte_newprot(*npte)) 214 else if(pte_newprot(*npte))
214 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, 215 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
215 &op_index, last_op, mmu, 216 &op_index, last_op, mmu,
216 &flush, do_ops); 217 &flush, do_ops);
217 218
218 *npte = pte_mkuptodate(*npte); 219 *npte = pte_mkuptodate(*npte);
219 addr += PAGE_SIZE; 220 addr += PAGE_SIZE;
220 } 221 }
221
222 if(!ret) 222 if(!ret)
223 ret = (*do_ops)(mmu, ops, op_index, 1, &flush); 223 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
224 224
225 /* This is not an else because ret is modified above */ 225/* This is not an else because ret is modified above */
226 if(ret) { 226 if(ret) {
227 printk("fix_range_common: failed, killing current process\n"); 227 printk("fix_range_common: failed, killing current process\n");
228 force_sig(SIGKILL, current); 228 force_sig(SIGKILL, current);
@@ -231,160 +231,160 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
231 231
232int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) 232int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
233{ 233{
234 struct mm_struct *mm; 234 struct mm_struct *mm;
235 pgd_t *pgd; 235 pgd_t *pgd;
236 pud_t *pud; 236 pud_t *pud;
237 pmd_t *pmd; 237 pmd_t *pmd;
238 pte_t *pte; 238 pte_t *pte;
239 unsigned long addr, last; 239 unsigned long addr, last;
240 int updated = 0, err; 240 int updated = 0, err;
241 241
242 mm = &init_mm; 242 mm = &init_mm;
243 for(addr = start; addr < end;){ 243 for(addr = start; addr < end;){
244 pgd = pgd_offset(mm, addr); 244 pgd = pgd_offset(mm, addr);
245 if(!pgd_present(*pgd)){ 245 if(!pgd_present(*pgd)){
246 last = ADD_ROUND(addr, PGDIR_SIZE); 246 last = ADD_ROUND(addr, PGDIR_SIZE);
247 if(last > end) 247 if(last > end)
248 last = end; 248 last = end;
249 if(pgd_newpage(*pgd)){ 249 if(pgd_newpage(*pgd)){
250 updated = 1; 250 updated = 1;
251 err = os_unmap_memory((void *) addr, 251 err = os_unmap_memory((void *) addr,
252 last - addr); 252 last - addr);
253 if(err < 0) 253 if(err < 0)
254 panic("munmap failed, errno = %d\n", 254 panic("munmap failed, errno = %d\n",
255 -err); 255 -err);
256 } 256 }
257 addr = last; 257 addr = last;
258 continue; 258 continue;
259 } 259 }
260 260
261 pud = pud_offset(pgd, addr); 261 pud = pud_offset(pgd, addr);
262 if(!pud_present(*pud)){ 262 if(!pud_present(*pud)){
263 last = ADD_ROUND(addr, PUD_SIZE); 263 last = ADD_ROUND(addr, PUD_SIZE);
264 if(last > end) 264 if(last > end)
265 last = end; 265 last = end;
266 if(pud_newpage(*pud)){ 266 if(pud_newpage(*pud)){
267 updated = 1; 267 updated = 1;
268 err = os_unmap_memory((void *) addr, 268 err = os_unmap_memory((void *) addr,
269 last - addr); 269 last - addr);
270 if(err < 0) 270 if(err < 0)
271 panic("munmap failed, errno = %d\n", 271 panic("munmap failed, errno = %d\n",
272 -err); 272 -err);
273 } 273 }
274 addr = last; 274 addr = last;
275 continue; 275 continue;
276 } 276 }
277 277
278 pmd = pmd_offset(pud, addr); 278 pmd = pmd_offset(pud, addr);
279 if(!pmd_present(*pmd)){ 279 if(!pmd_present(*pmd)){
280 last = ADD_ROUND(addr, PMD_SIZE); 280 last = ADD_ROUND(addr, PMD_SIZE);
281 if(last > end) 281 if(last > end)
282 last = end; 282 last = end;
283 if(pmd_newpage(*pmd)){ 283 if(pmd_newpage(*pmd)){
284 updated = 1; 284 updated = 1;
285 err = os_unmap_memory((void *) addr, 285 err = os_unmap_memory((void *) addr,
286 last - addr); 286 last - addr);
287 if(err < 0) 287 if(err < 0)
288 panic("munmap failed, errno = %d\n", 288 panic("munmap failed, errno = %d\n",
289 -err); 289 -err);
290 } 290 }
291 addr = last; 291 addr = last;
292 continue; 292 continue;
293 } 293 }
294 294
295 pte = pte_offset_kernel(pmd, addr); 295 pte = pte_offset_kernel(pmd, addr);
296 if(!pte_present(*pte) || pte_newpage(*pte)){ 296 if(!pte_present(*pte) || pte_newpage(*pte)){
297 updated = 1; 297 updated = 1;
298 err = os_unmap_memory((void *) addr, 298 err = os_unmap_memory((void *) addr,
299 PAGE_SIZE); 299 PAGE_SIZE);
300 if(err < 0) 300 if(err < 0)
301 panic("munmap failed, errno = %d\n", 301 panic("munmap failed, errno = %d\n",
302 -err); 302 -err);
303 if(pte_present(*pte)) 303 if(pte_present(*pte))
304 map_memory(addr, 304 map_memory(addr,
305 pte_val(*pte) & PAGE_MASK, 305 pte_val(*pte) & PAGE_MASK,
306 PAGE_SIZE, 1, 1, 1); 306 PAGE_SIZE, 1, 1, 1);
307 } 307 }
308 else if(pte_newprot(*pte)){ 308 else if(pte_newprot(*pte)){
309 updated = 1; 309 updated = 1;
310 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); 310 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
311 } 311 }
312 addr += PAGE_SIZE; 312 addr += PAGE_SIZE;
313 } 313 }
314 return(updated); 314 return(updated);
315} 315}
316 316
317pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) 317pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
318{ 318{
319 return(pgd_offset(mm, address)); 319 return(pgd_offset(mm, address));
320} 320}
321 321
322pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) 322pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
323{ 323{
324 return(pud_offset(pgd, address)); 324 return(pud_offset(pgd, address));
325} 325}
326 326
327pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) 327pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
328{ 328{
329 return(pmd_offset(pud, address)); 329 return(pmd_offset(pud, address));
330} 330}
331 331
332pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) 332pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
333{ 333{
334 return(pte_offset_kernel(pmd, address)); 334 return(pte_offset_kernel(pmd, address));
335} 335}
336 336
337pte_t *addr_pte(struct task_struct *task, unsigned long addr) 337pte_t *addr_pte(struct task_struct *task, unsigned long addr)
338{ 338{
339 pgd_t *pgd = pgd_offset(task->mm, addr); 339 pgd_t *pgd = pgd_offset(task->mm, addr);
340 pud_t *pud = pud_offset(pgd, addr); 340 pud_t *pud = pud_offset(pgd, addr);
341 pmd_t *pmd = pmd_offset(pud, addr); 341 pmd_t *pmd = pmd_offset(pud, addr);
342 342
343 return(pte_offset_map(pmd, addr)); 343 return(pte_offset_map(pmd, addr));
344} 344}
345 345
346void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) 346void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
347{ 347{
348 address &= PAGE_MASK; 348 address &= PAGE_MASK;
349 flush_tlb_range(vma, address, address + PAGE_SIZE); 349 flush_tlb_range(vma, address, address + PAGE_SIZE);
350} 350}
351 351
352void flush_tlb_all(void) 352void flush_tlb_all(void)
353{ 353{
354 flush_tlb_mm(current->mm); 354 flush_tlb_mm(current->mm);
355} 355}
356 356
357void flush_tlb_kernel_range(unsigned long start, unsigned long end) 357void flush_tlb_kernel_range(unsigned long start, unsigned long end)
358{ 358{
359 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt, 359 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
360 flush_tlb_kernel_range_common, start, end); 360 flush_tlb_kernel_range_common, start, end);
361} 361}
362 362
363void flush_tlb_kernel_vm(void) 363void flush_tlb_kernel_vm(void)
364{ 364{
365 CHOOSE_MODE(flush_tlb_kernel_vm_tt(), 365 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
366 flush_tlb_kernel_range_common(start_vm, end_vm)); 366 flush_tlb_kernel_range_common(start_vm, end_vm));
367} 367}
368 368
369void __flush_tlb_one(unsigned long addr) 369void __flush_tlb_one(unsigned long addr)
370{ 370{
371 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr); 371 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
372} 372}
373 373
374void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 374void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
375 unsigned long end) 375 unsigned long end)
376{ 376{
377 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start, 377 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
378 end); 378 end);
379} 379}
380 380
381void flush_tlb_mm(struct mm_struct *mm) 381void flush_tlb_mm(struct mm_struct *mm)
382{ 382{
383 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm); 383 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
384} 384}
385 385
386void force_flush_all(void) 386void force_flush_all(void)
387{ 387{
388 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas()); 388 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
389} 389}
390 390
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index ac70fa5a2e2a..61a23fff4395 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -140,14 +140,6 @@ void segv_handler(int sig, union uml_pt_regs *regs)
140 segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); 140 segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
141} 141}
142 142
143struct kern_handlers handlinfo_kern = {
144 .relay_signal = relay_signal,
145 .winch = winch,
146 .bus_handler = relay_signal,
147 .page_fault = segv_handler,
148 .sigio_handler = sigio_handler,
149 .timer_handler = timer_handler
150};
151/* 143/*
152 * We give a *copy* of the faultinfo in the regs to segv. 144 * We give a *copy* of the faultinfo in the regs to segv.
153 * This must be done, since nesting SEGVs could overwrite 145 * This must be done, since nesting SEGVs could overwrite
@@ -227,9 +219,16 @@ void bad_segv(struct faultinfo fi, unsigned long ip)
227 219
228void relay_signal(int sig, union uml_pt_regs *regs) 220void relay_signal(int sig, union uml_pt_regs *regs)
229{ 221{
230 if(arch_handle_signal(sig, regs)) return; 222 if(arch_handle_signal(sig, regs))
231 if(!UPT_IS_USER(regs)) 223 return;
224
225 if(!UPT_IS_USER(regs)){
226 if(sig == SIGBUS)
227 printk("Bus error - the /dev/shm or /tmp mount likely "
228 "just ran out of space\n");
232 panic("Kernel mode signal %d", sig); 229 panic("Kernel mode signal %d", sig);
230 }
231
233 current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); 232 current->thread.arch.faultinfo = *UPT_FAULTINFO(regs);
234 force_sig(sig, current); 233 force_sig(sig, current);
235} 234}
@@ -246,6 +245,15 @@ void winch(int sig, union uml_pt_regs *regs)
246 do_IRQ(WINCH_IRQ, regs); 245 do_IRQ(WINCH_IRQ, regs);
247} 246}
248 247
248const struct kern_handlers handlinfo_kern = {
249 .relay_signal = relay_signal,
250 .winch = winch,
251 .bus_handler = bus_handler,
252 .page_fault = segv_handler,
253 .sigio_handler = sigio_handler,
254 .timer_handler = timer_handler
255};
256
249void trap_init(void) 257void trap_init(void)
250{ 258{
251} 259}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 7896cf98232d..55005710dcbb 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -106,7 +106,7 @@ static void c_stop(struct seq_file *m, void *v)
106{ 106{
107} 107}
108 108
109struct seq_operations cpuinfo_op = { 109const struct seq_operations cpuinfo_op = {
110 .start = c_start, 110 .start = c_start,
111 .next = c_next, 111 .next = c_next,
112 .stop = c_stop, 112 .stop = c_stop,