aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/dyn.lds.S4
-rw-r--r--arch/um/kernel/exec.c5
-rw-r--r--arch/um/kernel/exitcode.c31
-rw-r--r--arch/um/kernel/gmon_syms.c11
-rw-r--r--arch/um/kernel/gprof_syms.c13
-rw-r--r--arch/um/kernel/initrd.c30
-rw-r--r--arch/um/kernel/irq.c6
-rw-r--r--arch/um/kernel/ksyms.c9
-rw-r--r--arch/um/kernel/mem.c147
-rw-r--r--arch/um/kernel/physmem.c16
-rw-r--r--arch/um/kernel/process.c154
-rw-r--r--arch/um/kernel/reboot.c7
-rw-r--r--arch/um/kernel/sigio.c18
-rw-r--r--arch/um/kernel/signal.c16
-rw-r--r--arch/um/kernel/skas/clone.c32
-rw-r--r--arch/um/kernel/skas/mmu.c127
-rw-r--r--arch/um/kernel/skas/process.c20
-rw-r--r--arch/um/kernel/skas/syscall.c6
-rw-r--r--arch/um/kernel/skas/uaccess.c140
-rw-r--r--arch/um/kernel/smp.c14
-rw-r--r--arch/um/kernel/syscall.c3
-rw-r--r--arch/um/kernel/sysrq.c44
-rw-r--r--arch/um/kernel/time.c14
-rw-r--r--arch/um/kernel/tlb.c51
-rw-r--r--arch/um/kernel/trap.c33
-rw-r--r--arch/um/kernel/uaccess.c11
-rw-r--r--arch/um/kernel/um_arch.c100
-rw-r--r--arch/um/kernel/umid.c15
-rw-r--r--arch/um/kernel/uml.lds.S4
29 files changed, 548 insertions, 533 deletions
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 3866f4960f04..26090b7f323e 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -17,7 +17,7 @@ SECTIONS
17 __init_begin = .; 17 __init_begin = .;
18 .init.text : { 18 .init.text : {
19 _sinittext = .; 19 _sinittext = .;
20 *(.init.text) 20 INIT_TEXT
21 _einittext = .; 21 _einittext = .;
22 } 22 }
23 23
@@ -84,7 +84,7 @@ SECTIONS
84 84
85 #include "asm/common.lds.S" 85 #include "asm/common.lds.S"
86 86
87 init.data : { *(.init.data) } 87 init.data : { INIT_DATA }
88 88
89 /* Ensure the __preinit_array_start label is properly aligned. We 89 /* Ensure the __preinit_array_start label is properly aligned. We
90 could instead move the label definition inside the section, but 90 could instead move the label definition inside the section, but
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 8196450451cd..76a62c0cb2bc 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -19,12 +19,13 @@
19void flush_thread(void) 19void flush_thread(void)
20{ 20{
21 void *data = NULL; 21 void *data = NULL;
22 unsigned long end = proc_mm ? task_size : STUB_START;
23 int ret; 22 int ret;
24 23
25 arch_flush_thread(&current->thread.arch); 24 arch_flush_thread(&current->thread.arch);
26 25
27 ret = unmap(&current->mm->context.id, 0, end, 1, &data); 26 ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data);
27 ret = ret || unmap(&current->mm->context.id, STUB_END,
28 TASK_SIZE - STUB_END, 1, &data);
28 if (ret) { 29 if (ret) {
29 printk(KERN_ERR "flush_thread - clearing address space failed, " 30 printk(KERN_ERR "flush_thread - clearing address space failed, "
30 "err = %d\n", ret); 31 "err = %d\n", ret);
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index c716b5a6db13..984f80e668ca 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -1,15 +1,17 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/kernel.h" 6#include <linux/ctype.h>
7#include "linux/init.h" 7#include <linux/init.h>
8#include "linux/ctype.h" 8#include <linux/kernel.h>
9#include "linux/proc_fs.h" 9#include <linux/proc_fs.h>
10#include "asm/uaccess.h" 10#include <linux/types.h>
11#include <asm/uaccess.h>
11 12
12/* If read and write race, the read will still atomically read a valid 13/*
14 * If read and write race, the read will still atomically read a valid
13 * value. 15 * value.
14 */ 16 */
15int uml_exitcode = 0; 17int uml_exitcode = 0;
@@ -19,18 +21,19 @@ static int read_proc_exitcode(char *page, char **start, off_t off,
19{ 21{
20 int len, val; 22 int len, val;
21 23
22 /* Save uml_exitcode in a local so that we don't need to guarantee 24 /*
25 * Save uml_exitcode in a local so that we don't need to guarantee
23 * that sprintf accesses it atomically. 26 * that sprintf accesses it atomically.
24 */ 27 */
25 val = uml_exitcode; 28 val = uml_exitcode;
26 len = sprintf(page, "%d\n", val); 29 len = sprintf(page, "%d\n", val);
27 len -= off; 30 len -= off;
28 if(len <= off+count) 31 if (len <= off+count)
29 *eof = 1; 32 *eof = 1;
30 *start = page + off; 33 *start = page + off;
31 if(len > count) 34 if (len > count)
32 len = count; 35 len = count;
33 if(len < 0) 36 if (len < 0)
34 len = 0; 37 len = 0;
35 return len; 38 return len;
36} 39}
@@ -41,11 +44,11 @@ static int write_proc_exitcode(struct file *file, const char __user *buffer,
41 char *end, buf[sizeof("nnnnn\0")]; 44 char *end, buf[sizeof("nnnnn\0")];
42 int tmp; 45 int tmp;
43 46
44 if(copy_from_user(buf, buffer, count)) 47 if (copy_from_user(buf, buffer, count))
45 return -EFAULT; 48 return -EFAULT;
46 49
47 tmp = simple_strtol(buf, &end, 0); 50 tmp = simple_strtol(buf, &end, 0);
48 if((*end != '\0') && !isspace(*end)) 51 if ((*end != '\0') && !isspace(*end))
49 return -EINVAL; 52 return -EINVAL;
50 53
51 uml_exitcode = tmp; 54 uml_exitcode = tmp;
@@ -57,7 +60,7 @@ static int make_proc_exitcode(void)
57 struct proc_dir_entry *ent; 60 struct proc_dir_entry *ent;
58 61
59 ent = create_proc_entry("exitcode", 0600, &proc_root); 62 ent = create_proc_entry("exitcode", 0600, &proc_root);
60 if(ent == NULL){ 63 if (ent == NULL) {
61 printk(KERN_WARNING "make_proc_exitcode : Failed to register " 64 printk(KERN_WARNING "make_proc_exitcode : Failed to register "
62 "/proc/exitcode\n"); 65 "/proc/exitcode\n");
63 return 0; 66 return 0;
diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
index 734f873cab12..72eccd2a4113 100644
--- a/arch/um/kernel/gmon_syms.c
+++ b/arch/um/kernel/gmon_syms.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
@@ -8,12 +8,13 @@
8extern void __bb_init_func(void *) __attribute__((weak)); 8extern void __bb_init_func(void *) __attribute__((weak));
9EXPORT_SYMBOL(__bb_init_func); 9EXPORT_SYMBOL(__bb_init_func);
10 10
11/* This is defined (and referred to in profiling stub code) only by some GCC 11/*
12 * This is defined (and referred to in profiling stub code) only by some GCC
12 * versions in libgcov. 13 * versions in libgcov.
13 * 14 *
14 * Since SuSE backported the fix, we cannot handle it depending on GCC version. 15 * Since SuSE backported the fix, we cannot handle it depending on GCC version.
15 * So, unconditionally export it. But also give it a weak declaration, which will 16 * So, unconditionally export it. But also give it a weak declaration, which
16 * be overridden by any other one. 17 * will be overridden by any other one.
17 */ 18 */
18 19
19extern void __gcov_init(void *) __attribute__((weak)); 20extern void __gcov_init(void *) __attribute__((weak));
diff --git a/arch/um/kernel/gprof_syms.c b/arch/um/kernel/gprof_syms.c
index 9244f018d44c..e2f043d0de6c 100644
--- a/arch/um/kernel/gprof_syms.c
+++ b/arch/um/kernel/gprof_syms.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
@@ -7,14 +7,3 @@
7 7
8extern void mcount(void); 8extern void mcount(void);
9EXPORT_SYMBOL(mcount); 9EXPORT_SYMBOL(mcount);
10
11/*
12 * Overrides for Emacs so that we follow Linus's tabbing style.
13 * Emacs will notice this stuff at the end of the file and automatically
14 * adjust the settings for this buffer only. This must remain at the end
15 * of the file.
16 * ---------------------------------------------------------------------------
17 * Local variables:
18 * c-file-style: "linux"
19 * End:
20 */
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 16dc43e9d940..fa015565001b 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
@@ -7,7 +7,6 @@
7#include "linux/bootmem.h" 7#include "linux/bootmem.h"
8#include "linux/initrd.h" 8#include "linux/initrd.h"
9#include "asm/types.h" 9#include "asm/types.h"
10#include "kern_util.h"
11#include "initrd.h" 10#include "initrd.h"
12#include "init.h" 11#include "init.h"
13#include "os.h" 12#include "os.h"
@@ -21,18 +20,27 @@ static int __init read_initrd(void)
21 long long size; 20 long long size;
22 int err; 21 int err;
23 22
24 if(initrd == NULL) 23 if (initrd == NULL)
25 return 0; 24 return 0;
26 25
27 err = os_file_size(initrd, &size); 26 err = os_file_size(initrd, &size);
28 if(err) 27 if (err)
29 return 0; 28 return 0;
30 29
30 /*
31 * This is necessary because alloc_bootmem craps out if you
32 * ask for no memory.
33 */
34 if (size == 0) {
35 printk(KERN_ERR "\"%\" is a zero-size initrd\n");
36 return 0;
37 }
38
31 area = alloc_bootmem(size); 39 area = alloc_bootmem(size);
32 if(area == NULL) 40 if (area == NULL)
33 return 0; 41 return 0;
34 42
35 if(load_initrd(initrd, area, size) == -1) 43 if (load_initrd(initrd, area, size) == -1)
36 return 0; 44 return 0;
37 45
38 initrd_start = (unsigned long) area; 46 initrd_start = (unsigned long) area;
@@ -59,13 +67,15 @@ int load_initrd(char *filename, void *buf, int size)
59 int fd, n; 67 int fd, n;
60 68
61 fd = os_open_file(filename, of_read(OPENFLAGS()), 0); 69 fd = os_open_file(filename, of_read(OPENFLAGS()), 0);
62 if(fd < 0){ 70 if (fd < 0) {
63 printk("Opening '%s' failed - err = %d\n", filename, -fd); 71 printk(KERN_ERR "Opening '%s' failed - err = %d\n", filename,
72 -fd);
64 return -1; 73 return -1;
65 } 74 }
66 n = os_read_file(fd, buf, size); 75 n = os_read_file(fd, buf, size);
67 if(n != size){ 76 if (n != size) {
68 printk("Read of %d bytes from '%s' failed, err = %d\n", size, 77 printk(KERN_ERR "Read of %d bytes from '%s' failed, "
78 "err = %d\n", size,
69 filename, -n); 79 filename, -n);
70 return -1; 80 return -1;
71 } 81 }
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index ba11ccd6a8a3..91587f8db340 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -107,10 +107,9 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
107 struct pollfd *tmp_pfd; 107 struct pollfd *tmp_pfd;
108 struct irq_fd *new_fd, *irq_fd; 108 struct irq_fd *new_fd, *irq_fd;
109 unsigned long flags; 109 unsigned long flags;
110 int pid, events, err, n; 110 int events, err, n;
111 111
112 pid = os_getpid(); 112 err = os_set_fd_async(fd);
113 err = os_set_fd_async(fd, pid);
114 if (err < 0) 113 if (err < 0)
115 goto out; 114 goto out;
116 115
@@ -127,7 +126,6 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
127 .fd = fd, 126 .fd = fd,
128 .type = type, 127 .type = type,
129 .irq = irq, 128 .irq = irq,
130 .pid = pid,
131 .events = events, 129 .events = events,
132 .current_events = 0 } ); 130 .current_events = 0 } );
133 131
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index 1b388b41d95d..5311ee93ede3 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -18,15 +18,11 @@ EXPORT_SYMBOL(set_signals);
18EXPORT_SYMBOL(get_signals); 18EXPORT_SYMBOL(get_signals);
19EXPORT_SYMBOL(kernel_thread); 19EXPORT_SYMBOL(kernel_thread);
20EXPORT_SYMBOL(sys_waitpid); 20EXPORT_SYMBOL(sys_waitpid);
21EXPORT_SYMBOL(task_size);
22EXPORT_SYMBOL(flush_tlb_range); 21EXPORT_SYMBOL(flush_tlb_range);
23EXPORT_SYMBOL(host_task_size);
24EXPORT_SYMBOL(arch_validate); 22EXPORT_SYMBOL(arch_validate);
25EXPORT_SYMBOL(get_kmem_end);
26 23
27EXPORT_SYMBOL(high_physmem); 24EXPORT_SYMBOL(high_physmem);
28EXPORT_SYMBOL(empty_zero_page); 25EXPORT_SYMBOL(empty_zero_page);
29EXPORT_SYMBOL(um_virt_to_phys);
30EXPORT_SYMBOL(handle_page_fault); 26EXPORT_SYMBOL(handle_page_fault);
31EXPORT_SYMBOL(find_iomem); 27EXPORT_SYMBOL(find_iomem);
32 28
@@ -40,7 +36,6 @@ EXPORT_SYMBOL(uml_strdup);
40EXPORT_SYMBOL(os_stat_fd); 36EXPORT_SYMBOL(os_stat_fd);
41EXPORT_SYMBOL(os_stat_file); 37EXPORT_SYMBOL(os_stat_file);
42EXPORT_SYMBOL(os_access); 38EXPORT_SYMBOL(os_access);
43EXPORT_SYMBOL(os_get_exec_close);
44EXPORT_SYMBOL(os_set_exec_close); 39EXPORT_SYMBOL(os_set_exec_close);
45EXPORT_SYMBOL(os_getpid); 40EXPORT_SYMBOL(os_getpid);
46EXPORT_SYMBOL(os_open_file); 41EXPORT_SYMBOL(os_open_file);
@@ -71,10 +66,10 @@ EXPORT_SYMBOL(dump_thread);
71 66
72/* required for SMP */ 67/* required for SMP */
73 68
74extern void FASTCALL( __write_lock_failed(rwlock_t *rw)); 69extern void __write_lock_failed(rwlock_t *rw);
75EXPORT_SYMBOL(__write_lock_failed); 70EXPORT_SYMBOL(__write_lock_failed);
76 71
77extern void FASTCALL( __read_lock_failed(rwlock_t *rw)); 72extern void __read_lock_failed(rwlock_t *rw);
78EXPORT_SYMBOL(__read_lock_failed); 73EXPORT_SYMBOL(__read_lock_failed);
79 74
80#endif 75#endif
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 59822dee438a..d872fdce1d7e 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -1,49 +1,41 @@
1/* 1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/stddef.h" 6#include <linux/stddef.h>
7#include "linux/kernel.h" 7#include <linux/bootmem.h>
8#include "linux/mm.h" 8#include <linux/gfp.h>
9#include "linux/bootmem.h" 9#include <linux/highmem.h>
10#include "linux/swap.h" 10#include <linux/mm.h>
11#include "linux/highmem.h" 11#include <linux/swap.h>
12#include "linux/gfp.h" 12#include <asm/fixmap.h>
13#include "asm/page.h" 13#include <asm/page.h>
14#include "asm/fixmap.h"
15#include "asm/pgalloc.h"
16#include "kern_util.h"
17#include "as-layout.h" 14#include "as-layout.h"
15#include "init.h"
18#include "kern.h" 16#include "kern.h"
17#include "kern_util.h"
19#include "mem_user.h" 18#include "mem_user.h"
20#include "um_uaccess.h"
21#include "os.h" 19#include "os.h"
22#include "linux/types.h"
23#include "linux/string.h"
24#include "init.h"
25#include "kern_constants.h"
26 20
27/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ 21/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
28unsigned long *empty_zero_page = NULL; 22unsigned long *empty_zero_page = NULL;
29/* allocated in paging_init and unchanged thereafter */ 23/* allocated in paging_init and unchanged thereafter */
30unsigned long *empty_bad_page = NULL; 24unsigned long *empty_bad_page = NULL;
25
26/*
27 * Initialized during boot, and readonly for initializing page tables
28 * afterwards
29 */
31pgd_t swapper_pg_dir[PTRS_PER_PGD]; 30pgd_t swapper_pg_dir[PTRS_PER_PGD];
31
32/* Initialized at boot time, and readonly after that */
32unsigned long long highmem; 33unsigned long long highmem;
33int kmalloc_ok = 0; 34int kmalloc_ok = 0;
34 35
36/* Used during early boot */
35static unsigned long brk_end; 37static unsigned long brk_end;
36 38
37void unmap_physmem(void)
38{
39 os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
40}
41
42static void map_cb(void *unused)
43{
44 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
45}
46
47#ifdef CONFIG_HIGHMEM 39#ifdef CONFIG_HIGHMEM
48static void setup_highmem(unsigned long highmem_start, 40static void setup_highmem(unsigned long highmem_start,
49 unsigned long highmem_len) 41 unsigned long highmem_len)
@@ -53,7 +45,7 @@ static void setup_highmem(unsigned long highmem_start,
53 int i; 45 int i;
54 46
55 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; 47 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
56 for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){ 48 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
57 page = &mem_map[highmem_pfn + i]; 49 page = &mem_map[highmem_pfn + i];
58 ClearPageReserved(page); 50 ClearPageReserved(page);
59 init_page_count(page); 51 init_page_count(page);
@@ -65,14 +57,13 @@ static void setup_highmem(unsigned long highmem_start,
65void __init mem_init(void) 57void __init mem_init(void)
66{ 58{
67 /* clear the zero-page */ 59 /* clear the zero-page */
68 memset((void *) empty_zero_page, 0, PAGE_SIZE); 60 memset(empty_zero_page, 0, PAGE_SIZE);
69 61
70 /* Map in the area just after the brk now that kmalloc is about 62 /* Map in the area just after the brk now that kmalloc is about
71 * to be turned on. 63 * to be turned on.
72 */ 64 */
73 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); 65 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
74 map_cb(NULL); 66 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
75 initial_thread_cb(map_cb, NULL);
76 free_bootmem(__pa(brk_end), uml_reserved - brk_end); 67 free_bootmem(__pa(brk_end), uml_reserved - brk_end);
77 uml_reserved = brk_end; 68 uml_reserved = brk_end;
78 69
@@ -85,7 +76,7 @@ void __init mem_init(void)
85#endif 76#endif
86 num_physpages = totalram_pages; 77 num_physpages = totalram_pages;
87 max_pfn = totalram_pages; 78 max_pfn = totalram_pages;
88 printk(KERN_INFO "Memory: %luk available\n", 79 printk(KERN_INFO "Memory: %luk available\n",
89 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); 80 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
90 kmalloc_ok = 1; 81 kmalloc_ok = 1;
91 82
@@ -119,7 +110,7 @@ static void __init one_md_table_init(pud_t *pud)
119#endif 110#endif
120} 111}
121 112
122static void __init fixrange_init(unsigned long start, unsigned long end, 113static void __init fixrange_init(unsigned long start, unsigned long end,
123 pgd_t *pgd_base) 114 pgd_t *pgd_base)
124{ 115{
125 pgd_t *pgd; 116 pgd_t *pgd;
@@ -138,7 +129,7 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
138 if (pud_none(*pud)) 129 if (pud_none(*pud))
139 one_md_table_init(pud); 130 one_md_table_init(pud);
140 pmd = pmd_offset(pud, vaddr); 131 pmd = pmd_offset(pud, vaddr);
141 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { 132 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
142 one_page_table_init(pmd); 133 one_page_table_init(pmd);
143 vaddr += PMD_SIZE; 134 vaddr += PMD_SIZE;
144 } 135 }
@@ -152,7 +143,7 @@ pgprot_t kmap_prot;
152 143
153#define kmap_get_fixmap_pte(vaddr) \ 144#define kmap_get_fixmap_pte(vaddr) \
154 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ 145 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
155 (vaddr)), (vaddr)) 146 (vaddr)), (vaddr))
156 147
157static void __init kmap_init(void) 148static void __init kmap_init(void)
158{ 149{
@@ -197,21 +188,23 @@ static void __init fixaddr_user_init( void)
197 pud_t *pud; 188 pud_t *pud;
198 pmd_t *pmd; 189 pmd_t *pmd;
199 pte_t *pte; 190 pte_t *pte;
200 unsigned long paddr, vaddr = FIXADDR_USER_START; 191 phys_t p;
192 unsigned long v, vaddr = FIXADDR_USER_START;
201 193
202 if ( ! size ) 194 if (!size)
203 return; 195 return;
204 196
205 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); 197 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
206 paddr = (unsigned long)alloc_bootmem_low_pages( size); 198 v = (unsigned long) alloc_bootmem_low_pages(size);
207 memcpy( (void *)paddr, (void *)FIXADDR_USER_START, size); 199 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
208 paddr = __pa(paddr); 200 p = __pa(v);
209 for ( ; size > 0; size-=PAGE_SIZE, vaddr+=PAGE_SIZE, paddr+=PAGE_SIZE){ 201 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
202 p += PAGE_SIZE) {
210 pgd = swapper_pg_dir + pgd_index(vaddr); 203 pgd = swapper_pg_dir + pgd_index(vaddr);
211 pud = pud_offset(pgd, vaddr); 204 pud = pud_offset(pgd, vaddr);
212 pmd = pmd_offset(pud, vaddr); 205 pmd = pmd_offset(pud, vaddr);
213 pte = pte_offset_kernel(pmd, vaddr); 206 pte = pte_offset_kernel(pmd, vaddr);
214 pte_set_val( (*pte), paddr, PAGE_READONLY); 207 pte_set_val(*pte, p, PAGE_READONLY);
215 } 208 }
216#endif 209#endif
217} 210}
@@ -223,7 +216,7 @@ void __init paging_init(void)
223 216
224 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 217 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
225 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 218 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
226 for(i = 0; i < ARRAY_SIZE(zones_size); i++) 219 for (i = 0; i < ARRAY_SIZE(zones_size); i++)
227 zones_size[i] = 0; 220 zones_size[i] = 0;
228 221
229 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) - 222 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
@@ -253,32 +246,33 @@ struct page *arch_validate(struct page *page, gfp_t mask, int order)
253 int i; 246 int i;
254 247
255 again: 248 again:
256 if(page == NULL) 249 if (page == NULL)
257 return page; 250 return page;
258 if(PageHighMem(page)) 251 if (PageHighMem(page))
259 return page; 252 return page;
260 253
261 addr = (unsigned long) page_address(page); 254 addr = (unsigned long) page_address(page);
262 for(i = 0; i < (1 << order); i++){ 255 for (i = 0; i < (1 << order); i++) {
263 current->thread.fault_addr = (void *) addr; 256 current->thread.fault_addr = (void *) addr;
264 if(__do_copy_to_user((void __user *) addr, &zero, 257 if (__do_copy_to_user((void __user *) addr, &zero,
265 sizeof(zero), 258 sizeof(zero),
266 &current->thread.fault_addr, 259 &current->thread.fault_addr,
267 &current->thread.fault_catcher)){ 260 &current->thread.fault_catcher)) {
268 if(!(mask & __GFP_WAIT)) 261 if (!(mask & __GFP_WAIT))
269 return NULL; 262 return NULL;
270 else break; 263 else break;
271 } 264 }
272 addr += PAGE_SIZE; 265 addr += PAGE_SIZE;
273 } 266 }
274 267
275 if(i == (1 << order)) 268 if (i == (1 << order))
276 return page; 269 return page;
277 page = alloc_pages(mask, order); 270 page = alloc_pages(mask, order);
278 goto again; 271 goto again;
279} 272}
280 273
281/* This can't do anything because nothing in the kernel image can be freed 274/*
275 * This can't do anything because nothing in the kernel image can be freed
282 * since it's not in kernel physical memory. 276 * since it's not in kernel physical memory.
283 */ 277 */
284 278
@@ -290,8 +284,8 @@ void free_initmem(void)
290void free_initrd_mem(unsigned long start, unsigned long end) 284void free_initrd_mem(unsigned long start, unsigned long end)
291{ 285{
292 if (start < end) 286 if (start < end)
293 printk ("Freeing initrd memory: %ldk freed\n", 287 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
294 (end - start) >> 10); 288 (end - start) >> 10);
295 for (; start < end; start += PAGE_SIZE) { 289 for (; start < end; start += PAGE_SIZE) {
296 ClearPageReserved(virt_to_page(start)); 290 ClearPageReserved(virt_to_page(start));
297 init_page_count(virt_to_page(start)); 291 init_page_count(virt_to_page(start));
@@ -308,32 +302,31 @@ void show_mem(void)
308 int highmem = 0; 302 int highmem = 0;
309 struct page *page; 303 struct page *page;
310 304
311 printk("Mem-info:\n"); 305 printk(KERN_INFO "Mem-info:\n");
312 show_free_areas(); 306 show_free_areas();
313 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 307 printk(KERN_INFO "Free swap: %6ldkB\n",
308 nr_swap_pages<<(PAGE_SHIFT-10));
314 pfn = max_mapnr; 309 pfn = max_mapnr;
315 while(pfn-- > 0) { 310 while (pfn-- > 0) {
316 page = pfn_to_page(pfn); 311 page = pfn_to_page(pfn);
317 total++; 312 total++;
318 if(PageHighMem(page)) 313 if (PageHighMem(page))
319 highmem++; 314 highmem++;
320 if(PageReserved(page)) 315 if (PageReserved(page))
321 reserved++; 316 reserved++;
322 else if(PageSwapCache(page)) 317 else if (PageSwapCache(page))
323 cached++; 318 cached++;
324 else if(page_count(page)) 319 else if (page_count(page))
325 shared += page_count(page) - 1; 320 shared += page_count(page) - 1;
326 } 321 }
327 printk("%d pages of RAM\n", total); 322 printk(KERN_INFO "%d pages of RAM\n", total);
328 printk("%d pages of HIGHMEM\n", highmem); 323 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
329 printk("%d reserved pages\n", reserved); 324 printk(KERN_INFO "%d reserved pages\n", reserved);
330 printk("%d pages shared\n", shared); 325 printk(KERN_INFO "%d pages shared\n", shared);
331 printk("%d pages swap cached\n", cached); 326 printk(KERN_INFO "%d pages swap cached\n", cached);
332} 327}
333 328
334/* 329/* Allocate and free page tables. */
335 * Allocate and free page tables.
336 */
337 330
338pgd_t *pgd_alloc(struct mm_struct *mm) 331pgd_t *pgd_alloc(struct mm_struct *mm)
339{ 332{
@@ -341,14 +334,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
341 334
342 if (pgd) { 335 if (pgd) {
343 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 336 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
344 memcpy(pgd + USER_PTRS_PER_PGD, 337 memcpy(pgd + USER_PTRS_PER_PGD,
345 swapper_pg_dir + USER_PTRS_PER_PGD, 338 swapper_pg_dir + USER_PTRS_PER_PGD,
346 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 339 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
347 } 340 }
348 return pgd; 341 return pgd;
349} 342}
350 343
351void pgd_free(pgd_t *pgd) 344void pgd_free(struct mm_struct *mm, pgd_t *pgd)
352{ 345{
353 free_page((unsigned long) pgd); 346 free_page((unsigned long) pgd);
354} 347}
@@ -368,3 +361,15 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
368 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 361 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
369 return pte; 362 return pte;
370} 363}
364
365#ifdef CONFIG_3_LEVEL_PGTABLES
366pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
367{
368 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
369
370 if (pmd)
371 memset(pmd, 0, PAGE_SIZE);
372
373 return pmd;
374}
375#endif
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index e66432f42485..9757085a0220 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -55,16 +55,6 @@ int __init init_maps(unsigned long physmem, unsigned long iomem,
55 return 0; 55 return 0;
56} 56}
57 57
58/* Changed during early boot */
59static unsigned long kmem_top = 0;
60
61unsigned long get_kmem_end(void)
62{
63 if (kmem_top == 0)
64 kmem_top = host_task_size - 1024 * 1024;
65 return kmem_top;
66}
67
68void map_memory(unsigned long virt, unsigned long phys, unsigned long len, 58void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
69 int r, int w, int x) 59 int r, int w, int x)
70{ 60{
@@ -174,10 +164,10 @@ __uml_setup("iomem=", parse_iomem,
174 * setup_iomem, both of which run during early boot. Afterwards, it's 164 * setup_iomem, both of which run during early boot. Afterwards, it's
175 * unchanged. 165 * unchanged.
176 */ 166 */
177struct iomem_region *iomem_regions = NULL; 167struct iomem_region *iomem_regions;
178 168
179/* Initialized in parse_iomem */ 169/* Initialized in parse_iomem and unchanged thereafter */
180int iomem_size = 0; 170int iomem_size;
181 171
182unsigned long find_iomem(char *driver, unsigned long *len_out) 172unsigned long find_iomem(char *driver, unsigned long *len_out)
183{ 173{
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 0eae00b3e588..c07961bedb75 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -4,19 +4,21 @@
4 * Licensed under the GPL 4 * Licensed under the GPL
5 */ 5 */
6 6
7#include "linux/stddef.h" 7#include <linux/stddef.h>
8#include "linux/err.h" 8#include <linux/err.h>
9#include "linux/hardirq.h" 9#include <linux/hardirq.h>
10#include "linux/mm.h" 10#include <linux/gfp.h>
11#include "linux/personality.h" 11#include <linux/mm.h>
12#include "linux/proc_fs.h" 12#include <linux/personality.h>
13#include "linux/ptrace.h" 13#include <linux/proc_fs.h>
14#include "linux/random.h" 14#include <linux/ptrace.h>
15#include "linux/sched.h" 15#include <linux/random.h>
16#include "linux/tick.h" 16#include <linux/sched.h>
17#include "linux/threads.h" 17#include <linux/tick.h>
18#include "asm/pgtable.h" 18#include <linux/threads.h>
19#include "asm/uaccess.h" 19#include <asm/current.h>
20#include <asm/pgtable.h>
21#include <asm/uaccess.h>
20#include "as-layout.h" 22#include "as-layout.h"
21#include "kern_util.h" 23#include "kern_util.h"
22#include "os.h" 24#include "os.h"
@@ -30,7 +32,7 @@
30 */ 32 */
31struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 33struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
32 34
33static inline int external_pid(struct task_struct *task) 35static inline int external_pid(void)
34{ 36{
35 /* FIXME: Need to look up userspace_pid by cpu */ 37 /* FIXME: Need to look up userspace_pid by cpu */
36 return userspace_pid[0]; 38 return userspace_pid[0];
@@ -40,7 +42,7 @@ int pid_to_processor_id(int pid)
40{ 42{
41 int i; 43 int i;
42 44
43 for(i = 0; i < ncpus; i++) { 45 for (i = 0; i < ncpus; i++) {
44 if (cpu_tasks[i].pid == pid) 46 if (cpu_tasks[i].pid == pid)
45 return i; 47 return i;
46 } 48 }
@@ -60,8 +62,6 @@ unsigned long alloc_stack(int order, int atomic)
60 if (atomic) 62 if (atomic)
61 flags = GFP_ATOMIC; 63 flags = GFP_ATOMIC;
62 page = __get_free_pages(flags, order); 64 page = __get_free_pages(flags, order);
63 if (page == 0)
64 return 0;
65 65
66 return page; 66 return page;
67} 67}
@@ -80,15 +80,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
80static inline void set_current(struct task_struct *task) 80static inline void set_current(struct task_struct *task)
81{ 81{
82 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) 82 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
83 { external_pid(task), task }); 83 { external_pid(), task });
84} 84}
85 85
86extern void arch_switch_to(struct task_struct *from, struct task_struct *to); 86extern void arch_switch_to(struct task_struct *to);
87 87
88void *_switch_to(void *prev, void *next, void *last) 88void *_switch_to(void *prev, void *next, void *last)
89{ 89{
90 struct task_struct *from = prev; 90 struct task_struct *from = prev;
91 struct task_struct *to= next; 91 struct task_struct *to = next;
92 92
93 to->thread.prev_sched = from; 93 to->thread.prev_sched = from;
94 set_current(to); 94 set_current(to);
@@ -99,13 +99,13 @@ void *_switch_to(void *prev, void *next, void *last)
99 switch_threads(&from->thread.switch_buf, 99 switch_threads(&from->thread.switch_buf,
100 &to->thread.switch_buf); 100 &to->thread.switch_buf);
101 101
102 arch_switch_to(current->thread.prev_sched, current); 102 arch_switch_to(current);
103 103
104 if (current->thread.saved_task) 104 if (current->thread.saved_task)
105 show_regs(&(current->thread.regs)); 105 show_regs(&(current->thread.regs));
106 next= current->thread.saved_task; 106 to = current->thread.saved_task;
107 prev= current; 107 from = current;
108 } while(current->thread.saved_task); 108 } while (current->thread.saved_task);
109 109
110 return current->thread.prev_sched; 110 return current->thread.prev_sched;
111 111
@@ -163,8 +163,6 @@ void new_thread_handler(void)
163void fork_handler(void) 163void fork_handler(void)
164{ 164{
165 force_flush_all(); 165 force_flush_all();
166 if (current->thread.prev_sched == NULL)
167 panic("blech");
168 166
169 schedule_tail(current->thread.prev_sched); 167 schedule_tail(current->thread.prev_sched);
170 168
@@ -173,7 +171,7 @@ void fork_handler(void)
173 * arch_switch_to isn't needed. We could want to apply this to 171 * arch_switch_to isn't needed. We could want to apply this to
174 * improve performance. -bb 172 * improve performance. -bb
175 */ 173 */
176 arch_switch_to(current->thread.prev_sched, current); 174 arch_switch_to(current);
177 175
178 current->thread.prev_sched = NULL; 176 current->thread.prev_sched = NULL;
179 177
@@ -204,7 +202,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
204 arch_copy_thread(&current->thread.arch, &p->thread.arch); 202 arch_copy_thread(&current->thread.arch, &p->thread.arch);
205 } 203 }
206 else { 204 else {
207 init_thread_registers(&p->thread.regs.regs); 205 get_safe_registers(p->thread.regs.regs.gp);
208 p->thread.request.u.thread = current->thread.request.u.thread; 206 p->thread.request.u.thread = current->thread.request.u.thread;
209 handler = new_thread_handler; 207 handler = new_thread_handler;
210 } 208 }
@@ -237,7 +235,7 @@ void default_idle(void)
237{ 235{
238 unsigned long long nsecs; 236 unsigned long long nsecs;
239 237
240 while(1) { 238 while (1) {
241 /* endless idle loop with no priority at all */ 239 /* endless idle loop with no priority at all */
242 240
243 /* 241 /*
@@ -256,53 +254,10 @@ void default_idle(void)
256 254
257void cpu_idle(void) 255void cpu_idle(void)
258{ 256{
259 cpu_tasks[current_thread->cpu].pid = os_getpid(); 257 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
260 default_idle(); 258 default_idle();
261} 259}
262 260
263void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
264 pte_t *pte_out)
265{
266 pgd_t *pgd;
267 pud_t *pud;
268 pmd_t *pmd;
269 pte_t *pte;
270 pte_t ptent;
271
272 if (task->mm == NULL)
273 return ERR_PTR(-EINVAL);
274 pgd = pgd_offset(task->mm, addr);
275 if (!pgd_present(*pgd))
276 return ERR_PTR(-EINVAL);
277
278 pud = pud_offset(pgd, addr);
279 if (!pud_present(*pud))
280 return ERR_PTR(-EINVAL);
281
282 pmd = pmd_offset(pud, addr);
283 if (!pmd_present(*pmd))
284 return ERR_PTR(-EINVAL);
285
286 pte = pte_offset_kernel(pmd, addr);
287 ptent = *pte;
288 if (!pte_present(ptent))
289 return ERR_PTR(-EINVAL);
290
291 if (pte_out != NULL)
292 *pte_out = ptent;
293 return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
294}
295
296char *current_cmd(void)
297{
298#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
299 return "(Unknown)";
300#else
301 void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
302 return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
303#endif
304}
305
306void dump_thread(struct pt_regs *regs, struct user *u) 261void dump_thread(struct pt_regs *regs, struct user *u)
307{ 262{
308} 263}
@@ -317,7 +272,7 @@ int user_context(unsigned long sp)
317 unsigned long stack; 272 unsigned long stack;
318 273
319 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 274 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
320 return stack != (unsigned long) current_thread; 275 return stack != (unsigned long) current_thread_info();
321} 276}
322 277
323extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 278extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
@@ -331,7 +286,7 @@ void do_uml_exitcalls(void)
331 (*call)(); 286 (*call)();
332} 287}
333 288
334char *uml_strdup(char *string) 289char *uml_strdup(const char *string)
335{ 290{
336 return kstrdup(string, GFP_KERNEL); 291 return kstrdup(string, GFP_KERNEL);
337} 292}
@@ -359,7 +314,7 @@ int strlen_user_proc(char __user *str)
359int smp_sigio_handler(void) 314int smp_sigio_handler(void)
360{ 315{
361#ifdef CONFIG_SMP 316#ifdef CONFIG_SMP
362 int cpu = current_thread->cpu; 317 int cpu = current_thread_info()->cpu;
363 IPI_handler(cpu); 318 IPI_handler(cpu);
364 if (cpu != 0) 319 if (cpu != 0)
365 return 1; 320 return 1;
@@ -369,7 +324,7 @@ int smp_sigio_handler(void)
369 324
370int cpu(void) 325int cpu(void)
371{ 326{
372 return current_thread->cpu; 327 return current_thread_info()->cpu;
373} 328}
374 329
375static atomic_t using_sysemu = ATOMIC_INIT(0); 330static atomic_t using_sysemu = ATOMIC_INIT(0);
@@ -435,7 +390,7 @@ int singlestepping(void * t)
435{ 390{
436 struct task_struct *task = t ? t : current; 391 struct task_struct *task = t ? t : current;
437 392
438 if ( ! (task->ptrace & PT_DTRACE) ) 393 if (!(task->ptrace & PT_DTRACE))
439 return 0; 394 return 0;
440 395
441 if (task->thread.singlestep_syscall) 396 if (task->thread.singlestep_syscall)
@@ -459,3 +414,46 @@ unsigned long arch_align_stack(unsigned long sp)
459 return sp & ~0xf; 414 return sp & ~0xf;
460} 415}
461#endif 416#endif
417
418unsigned long get_wchan(struct task_struct *p)
419{
420 unsigned long stack_page, sp, ip;
421 bool seen_sched = 0;
422
423 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
424 return 0;
425
426 stack_page = (unsigned long) task_stack_page(p);
427 /* Bail if the process has no kernel stack for some reason */
428 if (stack_page == 0)
429 return 0;
430
431 sp = p->thread.switch_buf->JB_SP;
432 /*
433 * Bail if the stack pointer is below the bottom of the kernel
434 * stack for some reason
435 */
436 if (sp < stack_page)
437 return 0;
438
439 while (sp < stack_page + THREAD_SIZE) {
440 ip = *((unsigned long *) sp);
441 if (in_sched_functions(ip))
442 /* Ignore everything until we're above the scheduler */
443 seen_sched = 1;
444 else if (kernel_text_address(ip) && seen_sched)
445 return ip;
446
447 sp += sizeof(unsigned long);
448 }
449
450 return 0;
451}
452
453int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
454{
455 int cpu = current_thread_info()->cpu;
456
457 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
458}
459
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 04cebcf0679f..00197d3d21ec 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include "linux/sched.h" 6#include "linux/sched.h"
7#include "kern_util.h"
7#include "os.h" 8#include "os.h"
8#include "skas.h" 9#include "skas.h"
9 10
@@ -11,7 +12,7 @@ void (*pm_power_off)(void);
11 12
12static void kill_off_processes(void) 13static void kill_off_processes(void)
13{ 14{
14 if(proc_mm) 15 if (proc_mm)
15 /* 16 /*
16 * FIXME: need to loop over userspace_pids 17 * FIXME: need to loop over userspace_pids
17 */ 18 */
@@ -21,8 +22,8 @@ static void kill_off_processes(void)
21 int pid, me; 22 int pid, me;
22 23
23 me = os_getpid(); 24 me = os_getpid();
24 for_each_process(p){ 25 for_each_process(p) {
25 if(p->mm == NULL) 26 if (p->mm == NULL)
26 continue; 27 continue;
27 28
28 pid = p->mm->context.id.u.pid; 29 pid = p->mm->context.id.u.pid;
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index 89f9866a1354..2b272b63b514 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -1,18 +1,12 @@
1/* 1/*
2 * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/kernel.h" 6#include <linux/interrupt.h>
7#include "linux/list.h"
8#include "linux/slab.h"
9#include "linux/signal.h"
10#include "linux/interrupt.h"
11#include "init.h"
12#include "sigio.h"
13#include "irq_user.h"
14#include "irq_kern.h" 7#include "irq_kern.h"
15#include "os.h" 8#include "os.h"
9#include "sigio.h"
16 10
17/* Protected by sigio_lock() called from write_sigio_workaround */ 11/* Protected by sigio_lock() called from write_sigio_workaround */
18static int sigio_irq_fd = -1; 12static int sigio_irq_fd = -1;
@@ -33,9 +27,9 @@ int write_sigio_irq(int fd)
33 err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt, 27 err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
34 IRQF_DISABLED|IRQF_SAMPLE_RANDOM, "write sigio", 28 IRQF_DISABLED|IRQF_SAMPLE_RANDOM, "write sigio",
35 NULL); 29 NULL);
36 if(err){ 30 if (err) {
37 printk("write_sigio_irq : um_request_irq failed, err = %d\n", 31 printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
38 err); 32 "err = %d\n", err);
39 return -1; 33 return -1;
40 } 34 }
41 sigio_irq_fd = fd; 35 sigio_irq_fd = fd;
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 19cb97733937..b0fce720c4d0 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -3,12 +3,12 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/module.h" 6#include <linux/module.h>
7#include "linux/ptrace.h" 7#include <linux/ptrace.h>
8#include "linux/sched.h" 8#include <linux/sched.h>
9#include "asm/siginfo.h" 9#include <asm/siginfo.h>
10#include "asm/signal.h" 10#include <asm/signal.h>
11#include "asm/unistd.h" 11#include <asm/unistd.h>
12#include "frame_kern.h" 12#include "frame_kern.h"
13#include "kern_util.h" 13#include "kern_util.h"
14#include "sigcontext.h" 14#include "sigcontext.h"
@@ -36,7 +36,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
36 /* Did we come from a system call? */ 36 /* Did we come from a system call? */
37 if (PT_REGS_SYSCALL_NR(regs) >= 0) { 37 if (PT_REGS_SYSCALL_NR(regs) >= 0) {
38 /* If so, check system call restarting.. */ 38 /* If so, check system call restarting.. */
39 switch(PT_REGS_SYSCALL_RET(regs)) { 39 switch (PT_REGS_SYSCALL_RET(regs)) {
40 case -ERESTART_RESTARTBLOCK: 40 case -ERESTART_RESTARTBLOCK:
41 case -ERESTARTNOHAND: 41 case -ERESTARTNOHAND:
42 PT_REGS_SYSCALL_RET(regs) = -EINTR; 42 PT_REGS_SYSCALL_RET(regs) = -EINTR;
@@ -116,7 +116,7 @@ static int kern_do_signal(struct pt_regs *regs)
116 /* Did we come from a system call? */ 116 /* Did we come from a system call? */
117 if (!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)) { 117 if (!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)) {
118 /* Restart the system call - no handlers present */ 118 /* Restart the system call - no handlers present */
119 switch(PT_REGS_SYSCALL_RET(regs)) { 119 switch (PT_REGS_SYSCALL_RET(regs)) {
120 case -ERESTARTNOHAND: 120 case -ERESTARTNOHAND:
121 case -ERESTARTSYS: 121 case -ERESTARTSYS:
122 case -ERESTARTNOINTR: 122 case -ERESTARTNOINTR:
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
index 8d07a7acb909..2c8583c1a344 100644
--- a/arch/um/kernel/skas/clone.c
+++ b/arch/um/kernel/skas/clone.c
@@ -1,17 +1,20 @@
1#include <sched.h> 1/*
2 * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
2#include <signal.h> 6#include <signal.h>
3#include <sys/mman.h> 7#include <sched.h>
4#include <sys/time.h>
5#include <asm/unistd.h> 8#include <asm/unistd.h>
9#include <sys/time.h>
6#include "as-layout.h" 10#include "as-layout.h"
11#include "kern_constants.h"
7#include "ptrace_user.h" 12#include "ptrace_user.h"
8#include "skas.h"
9#include "stub-data.h" 13#include "stub-data.h"
10#include "uml-config.h"
11#include "sysdep/stub.h" 14#include "sysdep/stub.h"
12#include "kern_constants.h"
13 15
14/* This is in a separate file because it needs to be compiled with any 16/*
17 * This is in a separate file because it needs to be compiled with any
15 * extraneous gcc flags (-pg, -fprofile-arcs, -ftest-coverage) disabled 18 * extraneous gcc flags (-pg, -fprofile-arcs, -ftest-coverage) disabled
16 * 19 *
17 * Use UM_KERN_PAGE_SIZE instead of PAGE_SIZE because that calls getpagesize 20 * Use UM_KERN_PAGE_SIZE instead of PAGE_SIZE because that calls getpagesize
@@ -26,25 +29,26 @@ stub_clone_handler(void)
26 29
27 err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD, 30 err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
28 STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *)); 31 STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *));
29 if(err != 0) 32 if (err != 0)
30 goto out; 33 goto out;
31 34
32 err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0); 35 err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0);
33 if(err) 36 if (err)
34 goto out; 37 goto out;
35 38
36 err = stub_syscall3(__NR_setitimer, ITIMER_VIRTUAL, 39 err = stub_syscall3(__NR_setitimer, ITIMER_VIRTUAL,
37 (long) &data->timer, 0); 40 (long) &data->timer, 0);
38 if(err) 41 if (err)
39 goto out; 42 goto out;
40 43
41 remap_stack(data->fd, data->offset); 44 remap_stack(data->fd, data->offset);
42 goto done; 45 goto done;
43 46
44 out: 47 out:
45 /* save current result. 48 /*
46 * Parent: pid; 49 * save current result.
47 * child: retcode of mmap already saved and it jumps around this 50 * Parent: pid;
51 * child: retcode of mmap already saved and it jumps around this
48 * assignment 52 * assignment
49 */ 53 */
50 data->err = err; 54 data->err = err;
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index f859ec306cd5..78b3e9f69d57 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -34,33 +34,14 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
34 if (!pte) 34 if (!pte)
35 goto out_pte; 35 goto out_pte;
36 36
37 /*
38 * There's an interaction between the skas0 stub pages, stack
39 * randomization, and the BUG at the end of exit_mmap. exit_mmap
40 * checks that the number of page tables freed is the same as had
41 * been allocated. If the stack is on the last page table page,
42 * then the stack pte page will be freed, and if not, it won't. To
43 * avoid having to know where the stack is, or if the process mapped
44 * something at the top of its address space for some other reason,
45 * we set TASK_SIZE to end at the start of the last page table.
46 * This keeps exit_mmap off the last page, but introduces a leak
47 * of that page. So, we hang onto it here and free it in
48 * destroy_context_skas.
49 */
50
51 mm->context.last_page_table = pmd_page_vaddr(*pmd);
52#ifdef CONFIG_3_LEVEL_PGTABLES
53 mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
54#endif
55
56 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 37 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
57 *pte = pte_mkread(*pte); 38 *pte = pte_mkread(*pte);
58 return 0; 39 return 0;
59 40
60 out_pmd: 41 out_pmd:
61 pud_free(pud); 42 pud_free(mm, pud);
62 out_pte: 43 out_pte:
63 pmd_free(pmd); 44 pmd_free(mm, pmd);
64 out: 45 out:
65 return -ENOMEM; 46 return -ENOMEM;
66} 47}
@@ -76,24 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
76 stack = get_zeroed_page(GFP_KERNEL); 57 stack = get_zeroed_page(GFP_KERNEL);
77 if (stack == 0) 58 if (stack == 0)
78 goto out; 59 goto out;
79
80 /*
81 * This zeros the entry that pgd_alloc didn't, needed since
82 * we are about to reinitialize it, and want mm.nr_ptes to
83 * be accurate.
84 */
85 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
86
87 ret = init_stub_pte(mm, STUB_CODE,
88 (unsigned long) &__syscall_stub_start);
89 if (ret)
90 goto out_free;
91
92 ret = init_stub_pte(mm, STUB_DATA, stack);
93 if (ret)
94 goto out_free;
95
96 mm->nr_ptes--;
97 } 60 }
98 61
99 to_mm->id.stack = stack; 62 to_mm->id.stack = stack;
@@ -114,6 +77,11 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
114 to_mm->id.u.pid = copy_context_skas0(stack, 77 to_mm->id.u.pid = copy_context_skas0(stack,
115 from_mm->id.u.pid); 78 from_mm->id.u.pid);
116 else to_mm->id.u.pid = start_userspace(stack); 79 else to_mm->id.u.pid = start_userspace(stack);
80
81 if (to_mm->id.u.pid < 0) {
82 ret = to_mm->id.u.pid;
83 goto out_free;
84 }
117 } 85 }
118 86
119 ret = init_new_ldt(to_mm, from_mm); 87 ret = init_new_ldt(to_mm, from_mm);
@@ -132,24 +100,87 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
132 return ret; 100 return ret;
133} 101}
134 102
103void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
104{
105 struct page **pages;
106 int err, ret;
107
108 if (!skas_needs_stub)
109 return;
110
111 ret = init_stub_pte(mm, STUB_CODE,
112 (unsigned long) &__syscall_stub_start);
113 if (ret)
114 goto out;
115
116 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
117 if (ret)
118 goto out;
119
120 pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL);
121 if (pages == NULL) {
122 printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page "
123 "pointers\n");
124 goto out;
125 }
126
127 pages[0] = virt_to_page(&__syscall_stub_start);
128 pages[1] = virt_to_page(mm->context.id.stack);
129
130 /* dup_mmap already holds mmap_sem */
131 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
132 VM_READ | VM_MAYREAD | VM_EXEC |
133 VM_MAYEXEC | VM_DONTCOPY, pages);
134 if (err) {
135 printk(KERN_ERR "install_special_mapping returned %d\n", err);
136 goto out_free;
137 }
138 return;
139
140out_free:
141 kfree(pages);
142out:
143 force_sigsegv(SIGSEGV, current);
144}
145
146void arch_exit_mmap(struct mm_struct *mm)
147{
148 pte_t *pte;
149
150 pte = virt_to_pte(mm, STUB_CODE);
151 if (pte != NULL)
152 pte_clear(mm, STUB_CODE, pte);
153
154 pte = virt_to_pte(mm, STUB_DATA);
155 if (pte == NULL)
156 return;
157
158 pte_clear(mm, STUB_DATA, pte);
159}
160
135void destroy_context(struct mm_struct *mm) 161void destroy_context(struct mm_struct *mm)
136{ 162{
137 struct mm_context *mmu = &mm->context; 163 struct mm_context *mmu = &mm->context;
138 164
139 if (proc_mm) 165 if (proc_mm)
140 os_close_file(mmu->id.u.mm_fd); 166 os_close_file(mmu->id.u.mm_fd);
141 else 167 else {
168 /*
169 * If init_new_context wasn't called, this will be
170 * zero, resulting in a kill(0), which will result in the
171 * whole UML suddenly dying. Also, cover negative and
172 * 1 cases, since they shouldn't happen either.
173 */
174 if (mmu->id.u.pid < 2) {
175 printk(KERN_ERR "corrupt mm_context - pid = %d\n",
176 mmu->id.u.pid);
177 return;
178 }
142 os_kill_ptraced_process(mmu->id.u.pid, 1); 179 os_kill_ptraced_process(mmu->id.u.pid, 1);
180 }
143 181
144 if (!proc_mm || !ptrace_faultinfo) { 182 if (skas_needs_stub)
145 free_page(mmu->id.stack); 183 free_page(mmu->id.stack);
146 pte_lock_deinit(virt_to_page(mmu->last_page_table));
147 pte_free_kernel((pte_t *) mmu->last_page_table);
148 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
149#ifdef CONFIG_3_LEVEL_PGTABLES
150 pmd_free((pmd_t *) mmu->last_pmd);
151#endif
152 }
153 184
154 free_ldt(mmu); 185 free_ldt(mmu);
155} 186}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index fce389c2342f..2e9852c0d487 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -6,19 +6,25 @@
6#include "linux/init.h" 6#include "linux/init.h"
7#include "linux/sched.h" 7#include "linux/sched.h"
8#include "as-layout.h" 8#include "as-layout.h"
9#include "kern.h"
9#include "os.h" 10#include "os.h"
10#include "skas.h" 11#include "skas.h"
11 12
12int new_mm(unsigned long stack) 13int new_mm(unsigned long stack)
13{ 14{
14 int fd; 15 int fd, err;
15 16
16 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); 17 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
17 if (fd < 0) 18 if (fd < 0)
18 return fd; 19 return fd;
19 20
20 if (skas_needs_stub) 21 if (skas_needs_stub) {
21 map_stub_pages(fd, STUB_CODE, STUB_DATA, stack); 22 err = map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
23 if (err) {
24 os_close_file(fd);
25 return err;
26 }
27 }
22 28
23 return fd; 29 return fd;
24} 30}
@@ -49,8 +55,14 @@ int __init start_uml(void)
49{ 55{
50 stack_protections((unsigned long) &cpu0_irqstack); 56 stack_protections((unsigned long) &cpu0_irqstack);
51 set_sigstack(cpu0_irqstack, THREAD_SIZE); 57 set_sigstack(cpu0_irqstack, THREAD_SIZE);
52 if (proc_mm) 58 if (proc_mm) {
53 userspace_pid[0] = start_userspace(0); 59 userspace_pid[0] = start_userspace(0);
60 if (userspace_pid[0] < 0) {
61 printf("start_uml - start_userspace returned %d\n",
62 userspace_pid[0]);
63 exit(1);
64 }
65 }
54 66
55 init_new_thread_signals(); 67 init_new_thread_signals();
56 68
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 50b476f2b38d..4e3b820bd2be 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -9,6 +9,9 @@
9#include "sysdep/ptrace.h" 9#include "sysdep/ptrace.h"
10#include "sysdep/syscalls.h" 10#include "sysdep/syscalls.h"
11 11
12extern int syscall_table_size;
13#define NR_syscalls (syscall_table_size / sizeof(void *))
14
12void handle_syscall(struct uml_pt_regs *r) 15void handle_syscall(struct uml_pt_regs *r)
13{ 16{
14 struct pt_regs *regs = container_of(r, struct pt_regs, regs); 17 struct pt_regs *regs = container_of(r, struct pt_regs, regs);
@@ -17,9 +20,6 @@ void handle_syscall(struct uml_pt_regs *r)
17 20
18 syscall_trace(r, 0); 21 syscall_trace(r, 0);
19 22
20 current->thread.nsyscalls++;
21 nsyscalls++;
22
23 /* 23 /*
24 * This should go in the declaration of syscall, but when I do that, 24 * This should go in the declaration of syscall, but when I do that,
25 * strace -f -c bash -c 'ls ; ls' breaks, sometimes not tracing 25 * strace -f -c bash -c 'ls ; ls' breaks, sometimes not tracing
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 1d8b119f2d0e..e22c96993db3 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -3,128 +3,130 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/err.h" 6#include <linux/err.h>
7#include "linux/highmem.h" 7#include <linux/highmem.h>
8#include "linux/mm.h" 8#include <linux/mm.h>
9#include "asm/current.h" 9#include <linux/sched.h>
10#include "asm/page.h" 10#include <asm/current.h>
11#include "asm/pgtable.h" 11#include <asm/page.h>
12#include <asm/pgtable.h>
12#include "kern_util.h" 13#include "kern_util.h"
13#include "os.h" 14#include "os.h"
14 15
15extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr, 16pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
16 pte_t *pte_out);
17
18static unsigned long maybe_map(unsigned long virt, int is_write)
19{ 17{
20 pte_t pte; 18 pgd_t *pgd;
21 int err; 19 pud_t *pud;
20 pmd_t *pmd;
21
22 if (mm == NULL)
23 return NULL;
24
25 pgd = pgd_offset(mm, addr);
26 if (!pgd_present(*pgd))
27 return NULL;
28
29 pud = pud_offset(pgd, addr);
30 if (!pud_present(*pud))
31 return NULL;
22 32
23 void *phys = um_virt_to_phys(current, virt, &pte); 33 pmd = pmd_offset(pud, addr);
24 int dummy_code; 34 if (!pmd_present(*pmd))
35 return NULL;
36
37 return pte_offset_kernel(pmd, addr);
38}
39
40static pte_t *maybe_map(unsigned long virt, int is_write)
41{
42 pte_t *pte = virt_to_pte(current->mm, virt);
43 int err, dummy_code;
25 44
26 if (IS_ERR(phys) || (is_write && !pte_write(pte))) { 45 if ((pte == NULL) || !pte_present(*pte) ||
46 (is_write && !pte_write(*pte))) {
27 err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); 47 err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
28 if (err) 48 if (err)
29 return -1UL; 49 return NULL;
30 phys = um_virt_to_phys(current, virt, NULL); 50 pte = virt_to_pte(current->mm, virt);
31 } 51 }
32 if (IS_ERR(phys)) 52 if (!pte_present(*pte))
33 phys = (void *) -1; 53 pte = NULL;
34 54
35 return (unsigned long) phys; 55 return pte;
36} 56}
37 57
38static int do_op_one_page(unsigned long addr, int len, int is_write, 58static int do_op_one_page(unsigned long addr, int len, int is_write,
39 int (*op)(unsigned long addr, int len, void *arg), void *arg) 59 int (*op)(unsigned long addr, int len, void *arg), void *arg)
40{ 60{
61 jmp_buf buf;
41 struct page *page; 62 struct page *page;
42 int n; 63 pte_t *pte;
64 int n, faulted;
43 65
44 addr = maybe_map(addr, is_write); 66 pte = maybe_map(addr, is_write);
45 if (addr == -1UL) 67 if (pte == NULL)
46 return -1; 68 return -1;
47 69
48 page = phys_to_page(addr); 70 page = pte_page(*pte);
49 addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + 71 addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
50 (addr & ~PAGE_MASK); 72 (addr & ~PAGE_MASK);
51 73
52 n = (*op)(addr, len, arg); 74 current->thread.fault_catcher = &buf;
75
76 faulted = UML_SETJMP(&buf);
77 if (faulted == 0)
78 n = (*op)(addr, len, arg);
79 else
80 n = -1;
81
82 current->thread.fault_catcher = NULL;
53 83
54 kunmap_atomic(page, KM_UML_USERCOPY); 84 kunmap_atomic(page, KM_UML_USERCOPY);
55 85
56 return n; 86 return n;
57} 87}
58 88
59static void do_buffer_op(void *jmpbuf, void *arg_ptr) 89static int buffer_op(unsigned long addr, int len, int is_write,
90 int (*op)(unsigned long, int, void *), void *arg)
60{ 91{
61 va_list args; 92 int size, remain, n;
62 unsigned long addr; 93
63 int len, is_write, size, remain, n;
64 int (*op)(unsigned long, int, void *);
65 void *arg;
66 int *res;
67
68 va_copy(args, *(va_list *)arg_ptr);
69 addr = va_arg(args, unsigned long);
70 len = va_arg(args, int);
71 is_write = va_arg(args, int);
72 op = va_arg(args, void *);
73 arg = va_arg(args, void *);
74 res = va_arg(args, int *);
75 va_end(args);
76 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); 94 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
77 remain = len; 95 remain = len;
78 96
79 current->thread.fault_catcher = jmpbuf;
80 n = do_op_one_page(addr, size, is_write, op, arg); 97 n = do_op_one_page(addr, size, is_write, op, arg);
81 if (n != 0) { 98 if (n != 0) {
82 *res = (n < 0 ? remain : 0); 99 remain = (n < 0 ? remain : 0);
83 goto out; 100 goto out;
84 } 101 }
85 102
86 addr += size; 103 addr += size;
87 remain -= size; 104 remain -= size;
88 if (remain == 0) { 105 if (remain == 0)
89 *res = 0;
90 goto out; 106 goto out;
91 }
92 107
93 while(addr < ((addr + remain) & PAGE_MASK)) { 108 while (addr < ((addr + remain) & PAGE_MASK)) {
94 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); 109 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
95 if (n != 0) { 110 if (n != 0) {
96 *res = (n < 0 ? remain : 0); 111 remain = (n < 0 ? remain : 0);
97 goto out; 112 goto out;
98 } 113 }
99 114
100 addr += PAGE_SIZE; 115 addr += PAGE_SIZE;
101 remain -= PAGE_SIZE; 116 remain -= PAGE_SIZE;
102 } 117 }
103 if (remain == 0) { 118 if (remain == 0)
104 *res = 0;
105 goto out; 119 goto out;
106 }
107 120
108 n = do_op_one_page(addr, remain, is_write, op, arg); 121 n = do_op_one_page(addr, remain, is_write, op, arg);
109 if (n != 0) 122 if (n != 0) {
110 *res = (n < 0 ? remain : 0); 123 remain = (n < 0 ? remain : 0);
111 else *res = 0; 124 goto out;
112 out: 125 }
113 current->thread.fault_catcher = NULL;
114}
115
116static int buffer_op(unsigned long addr, int len, int is_write,
117 int (*op)(unsigned long addr, int len, void *arg),
118 void *arg)
119{
120 int faulted, res;
121
122 faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg,
123 &res);
124 if (!faulted)
125 return res;
126 126
127 return addr + len - (unsigned long) current->thread.fault_addr; 127 return 0;
128 out:
129 return remain;
128} 130}
129 131
130static int copy_chunk_from_user(unsigned long from, int len, void *arg) 132static int copy_chunk_from_user(unsigned long from, int len, void *arg)
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 36d89cf8d20b..e1062ec36d40 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -21,7 +21,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
21#include "asm/smp.h" 21#include "asm/smp.h"
22#include "asm/processor.h" 22#include "asm/processor.h"
23#include "asm/spinlock.h" 23#include "asm/spinlock.h"
24#include "kern_util.h"
25#include "kern.h" 24#include "kern.h"
26#include "irq_user.h" 25#include "irq_user.h"
27#include "os.h" 26#include "os.h"
@@ -61,7 +60,7 @@ void smp_send_stop(void)
61 continue; 60 continue;
62 os_write_file(cpu_data[i].ipi_pipe[1], "S", 1); 61 os_write_file(cpu_data[i].ipi_pipe[1], "S", 1);
63 } 62 }
64 printk(KERN_INFO "done\n"); 63 printk(KERN_CONT "done\n");
65} 64}
66 65
67static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 66static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
@@ -75,8 +74,7 @@ static int idle_proc(void *cpup)
75 if (err < 0) 74 if (err < 0)
76 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); 75 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
77 76
78 os_set_fd_async(cpu_data[cpu].ipi_pipe[0], 77 os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
79 current->thread.mode.tt.extern_pid);
80 78
81 wmb(); 79 wmb();
82 if (cpu_test_and_set(cpu, cpu_callin_map)) { 80 if (cpu_test_and_set(cpu, cpu_callin_map)) {
@@ -129,8 +127,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
129 if (err < 0) 127 if (err < 0)
130 panic("CPU#0 failed to create IPI pipe, errno = %d", -err); 128 panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
131 129
132 os_set_fd_async(cpu_data[me].ipi_pipe[0], 130 os_set_fd_async(cpu_data[me].ipi_pipe[0]);
133 current->thread.mode.tt.extern_pid);
134 131
135 for (cpu = 1; cpu < ncpus; cpu++) { 132 for (cpu = 1; cpu < ncpus; cpu++) {
136 printk(KERN_INFO "Booting processor %d...\n", cpu); 133 printk(KERN_INFO "Booting processor %d...\n", cpu);
@@ -143,9 +140,8 @@ void smp_prepare_cpus(unsigned int maxcpus)
143 while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) 140 while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
144 cpu_relax(); 141 cpu_relax();
145 142
146 if (cpu_isset(cpu, cpu_callin_map)) 143 printk(KERN_INFO "%s\n",
147 printk(KERN_INFO "done\n"); 144 cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
148 else printk(KERN_INFO "failed\n");
149 } 145 }
150} 146}
151 147
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index b9d92b2089ae..9cffc628a37e 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -13,9 +13,6 @@
13#include "asm/uaccess.h" 13#include "asm/uaccess.h"
14#include "asm/unistd.h" 14#include "asm/unistd.h"
15 15
16/* Unlocked, I don't care if this is a bit off */
17int nsyscalls = 0;
18
19long sys_fork(void) 16long sys_fork(void)
20{ 17{
21 long ret; 18 long ret;
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 93263571d813..56d43d0a3960 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -1,38 +1,37 @@
1/* 1/*
2 * Copyright (C) 2001 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/sched.h" 6#include <linux/kallsyms.h>
7#include "linux/kernel.h" 7#include <linux/kernel.h>
8#include "linux/module.h" 8#include <linux/module.h>
9#include "linux/kallsyms.h" 9#include <linux/sched.h>
10#include "asm/page.h"
11#include "asm/processor.h"
12#include "sysrq.h" 10#include "sysrq.h"
13 11
14/* Catch non-i386 SUBARCH's. */ 12/* Catch non-i386 SUBARCH's. */
15#if !defined(CONFIG_UML_X86) || defined(CONFIG_64BIT) 13#if !defined(CONFIG_UML_X86) || defined(CONFIG_64BIT)
16void show_trace(struct task_struct *task, unsigned long * stack) 14void show_trace(struct task_struct *task, unsigned long * stack)
17{ 15{
18 unsigned long addr; 16 unsigned long addr;
19 17
20 if (!stack) { 18 if (!stack) {
21 stack = (unsigned long*) &stack; 19 stack = (unsigned long*) &stack;
22 WARN_ON(1); 20 WARN_ON(1);
23 } 21 }
24 22
25 printk("Call Trace: \n"); 23 printk(KERN_INFO "Call Trace: \n");
26 while (((long) stack & (THREAD_SIZE-1)) != 0) { 24 while (((long) stack & (THREAD_SIZE-1)) != 0) {
27 addr = *stack; 25 addr = *stack;
28 if (__kernel_text_address(addr)) { 26 if (__kernel_text_address(addr)) {
29 printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); 27 printk(KERN_INFO "%08lx: [<%08lx>]",
30 print_symbol(" %s", addr); 28 (unsigned long) stack, addr);
31 printk("\n"); 29 print_symbol(KERN_CONT " %s", addr);
32 } 30 printk(KERN_CONT "\n");
33 stack++; 31 }
34 } 32 stack++;
35 printk("\n"); 33 }
34 printk(KERN_INFO "\n");
36} 35}
37#endif 36#endif
38 37
@@ -67,14 +66,13 @@ void show_stack(struct task_struct *task, unsigned long *esp)
67 } 66 }
68 67
69 stack = esp; 68 stack = esp;
70 for(i = 0; i < kstack_depth_to_print; i++) { 69 for (i = 0; i < kstack_depth_to_print; i++) {
71 if (kstack_end(stack)) 70 if (kstack_end(stack))
72 break; 71 break;
73 if (i && ((i % 8) == 0)) 72 if (i && ((i % 8) == 0))
74 printk("\n "); 73 printk("\n" KERN_INFO " ");
75 printk("%08lx ", *stack++); 74 printk("%08lx ", *stack++);
76 } 75 }
77 76
78 printk("Call Trace: \n");
79 show_trace(task, esp); 77 show_trace(task, esp);
80} 78}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 1ac746a9eae1..e066e84493b1 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -3,12 +3,12 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/clockchips.h" 6#include <linux/clockchips.h>
7#include "linux/interrupt.h" 7#include <linux/interrupt.h>
8#include "linux/jiffies.h" 8#include <linux/jiffies.h>
9#include "linux/threads.h" 9#include <linux/threads.h>
10#include "asm/irq.h" 10#include <asm/irq.h>
11#include "asm/param.h" 11#include <asm/param.h>
12#include "kern_util.h" 12#include "kern_util.h"
13#include "os.h" 13#include "os.h"
14 14
@@ -32,7 +32,7 @@ void timer_handler(int sig, struct uml_pt_regs *regs)
32static void itimer_set_mode(enum clock_event_mode mode, 32static void itimer_set_mode(enum clock_event_mode mode,
33 struct clock_event_device *evt) 33 struct clock_event_device *evt)
34{ 34{
35 switch(mode) { 35 switch (mode) {
36 case CLOCK_EVT_MODE_PERIODIC: 36 case CLOCK_EVT_MODE_PERIODIC:
37 set_interval(); 37 set_interval();
38 break; 38 break;
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index f4a0e407eee4..d175d0566af0 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -3,9 +3,10 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/mm.h" 6#include <linux/mm.h>
7#include "asm/pgtable.h" 7#include <linux/sched.h>
8#include "asm/tlbflush.h" 8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
9#include "as-layout.h" 10#include "as-layout.h"
10#include "mem_user.h" 11#include "mem_user.h"
11#include "os.h" 12#include "os.h"
@@ -56,7 +57,7 @@ static int do_ops(struct host_vm_change *hvc, int end,
56 57
57 for (i = 0; i < end && !ret; i++) { 58 for (i = 0; i < end && !ret; i++) {
58 op = &hvc->ops[i]; 59 op = &hvc->ops[i];
59 switch(op->type) { 60 switch (op->type) {
60 case MMAP: 61 case MMAP:
61 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len, 62 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
62 op->u.mmap.prot, op->u.mmap.fd, 63 op->u.mmap.prot, op->u.mmap.fd,
@@ -183,27 +184,30 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
183 184
184 pte = pte_offset_kernel(pmd, addr); 185 pte = pte_offset_kernel(pmd, addr);
185 do { 186 do {
187 if ((addr >= STUB_START) && (addr < STUB_END))
188 continue;
189
186 r = pte_read(*pte); 190 r = pte_read(*pte);
187 w = pte_write(*pte); 191 w = pte_write(*pte);
188 x = pte_exec(*pte); 192 x = pte_exec(*pte);
189 if (!pte_young(*pte)) { 193 if (!pte_young(*pte)) {
190 r = 0; 194 r = 0;
191 w = 0; 195 w = 0;
192 } else if (!pte_dirty(*pte)) { 196 } else if (!pte_dirty(*pte))
193 w = 0; 197 w = 0;
194 } 198
195 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 199 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
196 (x ? UM_PROT_EXEC : 0)); 200 (x ? UM_PROT_EXEC : 0));
197 if (hvc->force || pte_newpage(*pte)) { 201 if (hvc->force || pte_newpage(*pte)) {
198 if (pte_present(*pte)) 202 if (pte_present(*pte))
199 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, 203 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
200 PAGE_SIZE, prot, hvc); 204 PAGE_SIZE, prot, hvc);
201 else ret = add_munmap(addr, PAGE_SIZE, hvc); 205 else
202 } 206 ret = add_munmap(addr, PAGE_SIZE, hvc);
203 else if (pte_newprot(*pte)) 207 } else if (pte_newprot(*pte))
204 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); 208 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
205 *pte = pte_mkuptodate(*pte); 209 *pte = pte_mkuptodate(*pte);
206 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); 210 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
207 return ret; 211 return ret;
208} 212}
209 213
@@ -225,7 +229,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
225 } 229 }
226 } 230 }
227 else ret = update_pte_range(pmd, addr, next, hvc); 231 else ret = update_pte_range(pmd, addr, next, hvc);
228 } while (pmd++, addr = next, ((addr != end) && !ret)); 232 } while (pmd++, addr = next, ((addr < end) && !ret));
229 return ret; 233 return ret;
230} 234}
231 235
@@ -247,7 +251,7 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
247 } 251 }
248 } 252 }
249 else ret = update_pmd_range(pud, addr, next, hvc); 253 else ret = update_pmd_range(pud, addr, next, hvc);
250 } while (pud++, addr = next, ((addr != end) && !ret)); 254 } while (pud++, addr = next, ((addr < end) && !ret));
251 return ret; 255 return ret;
252} 256}
253 257
@@ -270,7 +274,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
270 } 274 }
271 } 275 }
272 else ret = update_pud_range(pgd, addr, next, &hvc); 276 else ret = update_pud_range(pgd, addr, next, &hvc);
273 } while (pgd++, addr = next, ((addr != end_addr) && !ret)); 277 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
274 278
275 if (!ret) 279 if (!ret)
276 ret = do_ops(&hvc, hvc.index, 1); 280 ret = do_ops(&hvc, hvc.index, 1);
@@ -485,9 +489,6 @@ void __flush_tlb_one(unsigned long addr)
485static void fix_range(struct mm_struct *mm, unsigned long start_addr, 489static void fix_range(struct mm_struct *mm, unsigned long start_addr,
486 unsigned long end_addr, int force) 490 unsigned long end_addr, int force)
487{ 491{
488 if (!proc_mm && (end_addr > STUB_START))
489 end_addr = STUB_START;
490
491 fix_range_common(mm, start_addr, end_addr, force); 492 fix_range_common(mm, start_addr, end_addr, force);
492} 493}
493 494
@@ -499,10 +500,9 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
499 else fix_range(vma->vm_mm, start, end, 0); 500 else fix_range(vma->vm_mm, start, end, 0);
500} 501}
501 502
502void flush_tlb_mm(struct mm_struct *mm) 503void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
504 unsigned long end)
503{ 505{
504 unsigned long end;
505
506 /* 506 /*
507 * Don't bother flushing if this address space is about to be 507 * Don't bother flushing if this address space is about to be
508 * destroyed. 508 * destroyed.
@@ -510,8 +510,17 @@ void flush_tlb_mm(struct mm_struct *mm)
510 if (atomic_read(&mm->mm_users) == 0) 510 if (atomic_read(&mm->mm_users) == 0)
511 return; 511 return;
512 512
513 end = proc_mm ? task_size : STUB_START; 513 fix_range(mm, start, end, 0);
514 fix_range(mm, 0, end, 0); 514}
515
516void flush_tlb_mm(struct mm_struct *mm)
517{
518 struct vm_area_struct *vma = mm->mmap;
519
520 while (vma != NULL) {
521 fix_range(mm, vma->vm_start, vma->vm_end, 0);
522 vma = vma->vm_next;
523 }
515} 524}
516 525
517void force_flush_all(void) 526void force_flush_all(void)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index cb3321f8e0a9..44e490419495 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -13,6 +13,7 @@
13#include "as-layout.h" 13#include "as-layout.h"
14#include "kern_util.h" 14#include "kern_util.h"
15#include "os.h" 15#include "os.h"
16#include "skas.h"
16#include "sysdep/sigcontext.h" 17#include "sysdep/sigcontext.h"
17 18
18/* 19/*
@@ -128,7 +129,19 @@ static void bad_segv(struct faultinfo fi, unsigned long ip)
128 force_sig_info(SIGSEGV, &si, current); 129 force_sig_info(SIGSEGV, &si, current);
129} 130}
130 131
131static void segv_handler(int sig, struct uml_pt_regs *regs) 132void fatal_sigsegv(void)
133{
134 force_sigsegv(SIGSEGV, current);
135 do_signal();
136 /*
137 * This is to tell gcc that we're not returning - do_signal
138 * can, in general, return, but in this case, it's not, since
139 * we just got a fatal SIGSEGV queued.
140 */
141 os_dump_core();
142}
143
144void segv_handler(int sig, struct uml_pt_regs *regs)
132{ 145{
133 struct faultinfo * fi = UPT_FAULTINFO(regs); 146 struct faultinfo * fi = UPT_FAULTINFO(regs);
134 147
@@ -216,9 +229,6 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
216 229
217void relay_signal(int sig, struct uml_pt_regs *regs) 230void relay_signal(int sig, struct uml_pt_regs *regs)
218{ 231{
219 if (arch_handle_signal(sig, regs))
220 return;
221
222 if (!UPT_IS_USER(regs)) { 232 if (!UPT_IS_USER(regs)) {
223 if (sig == SIGBUS) 233 if (sig == SIGBUS)
224 printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " 234 printk(KERN_ERR "Bus error - the host /dev/shm or /tmp "
@@ -226,31 +236,24 @@ void relay_signal(int sig, struct uml_pt_regs *regs)
226 panic("Kernel mode signal %d", sig); 236 panic("Kernel mode signal %d", sig);
227 } 237 }
228 238
239 arch_examine_signal(sig, regs);
240
229 current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); 241 current->thread.arch.faultinfo = *UPT_FAULTINFO(regs);
230 force_sig(sig, current); 242 force_sig(sig, current);
231} 243}
232 244
233static void bus_handler(int sig, struct uml_pt_regs *regs) 245void bus_handler(int sig, struct uml_pt_regs *regs)
234{ 246{
235 if (current->thread.fault_catcher != NULL) 247 if (current->thread.fault_catcher != NULL)
236 UML_LONGJMP(current->thread.fault_catcher, 1); 248 UML_LONGJMP(current->thread.fault_catcher, 1);
237 else relay_signal(sig, regs); 249 else relay_signal(sig, regs);
238} 250}
239 251
240static void winch(int sig, struct uml_pt_regs *regs) 252void winch(int sig, struct uml_pt_regs *regs)
241{ 253{
242 do_IRQ(WINCH_IRQ, regs); 254 do_IRQ(WINCH_IRQ, regs);
243} 255}
244 256
245const struct kern_handlers handlinfo_kern = {
246 .relay_signal = relay_signal,
247 .winch = winch,
248 .bus_handler = bus_handler,
249 .page_fault = segv_handler,
250 .sigio_handler = sigio_handler,
251 .timer_handler = timer_handler
252};
253
254void trap_init(void) 257void trap_init(void)
255{ 258{
256} 259}
diff --git a/arch/um/kernel/uaccess.c b/arch/um/kernel/uaccess.c
index d7436aacd26f..f0f4b040d7c5 100644
--- a/arch/um/kernel/uaccess.c
+++ b/arch/um/kernel/uaccess.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk) 2 * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
3 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) 3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL 4 * Licensed under the GPL
5 */ 5 */
6 6
7/* These are here rather than tt/uaccess.c because skas mode needs them in 7/*
8 * These are here rather than tt/uaccess.c because skas mode needs them in
8 * order to do SIGBUS recovery when a tmpfs mount runs out of room. 9 * order to do SIGBUS recovery when a tmpfs mount runs out of room.
9 */ 10 */
10 11
@@ -25,6 +26,8 @@ int __do_copy_to_user(void *to, const void *from, int n,
25 26
26 fault = __do_user_copy(to, from, n, fault_addr, fault_catcher, 27 fault = __do_user_copy(to, from, n, fault_addr, fault_catcher,
27 __do_copy, &faulted); 28 __do_copy, &faulted);
28 if(!faulted) return(0); 29 if (!faulted)
29 else return(n - (fault - (unsigned long) to)); 30 return 0;
31 else
32 return n - (fault - (unsigned long) to);
30} 33}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index f1c71393f578..468aba990dbd 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -3,22 +3,23 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/delay.h" 6#include <linux/delay.h>
7#include "linux/mm.h" 7#include <linux/init.h>
8#include "linux/module.h" 8#include <linux/mm.h>
9#include "linux/seq_file.h" 9#include <linux/module.h>
10#include "linux/string.h" 10#include <linux/seq_file.h>
11#include "linux/utsname.h" 11#include <linux/string.h>
12#include "asm/pgtable.h" 12#include <linux/utsname.h>
13#include "asm/processor.h" 13#include <asm/pgtable.h>
14#include "asm/setup.h" 14#include <asm/processor.h>
15#include "arch.h" 15#include <asm/setup.h>
16#include "as-layout.h" 16#include "as-layout.h"
17#include "arch.h"
17#include "init.h" 18#include "init.h"
18#include "kern.h" 19#include "kern.h"
20#include "kern_util.h"
19#include "mem_user.h" 21#include "mem_user.h"
20#include "os.h" 22#include "os.h"
21#include "skas.h"
22 23
23#define DEFAULT_COMMAND_LINE "root=98:0" 24#define DEFAULT_COMMAND_LINE "root=98:0"
24 25
@@ -100,8 +101,6 @@ const struct seq_operations cpuinfo_op = {
100}; 101};
101 102
102/* Set in linux_main */ 103/* Set in linux_main */
103unsigned long host_task_size;
104unsigned long task_size;
105unsigned long uml_physmem; 104unsigned long uml_physmem;
106unsigned long uml_reserved; /* Also modified in mem_init */ 105unsigned long uml_reserved; /* Also modified in mem_init */
107unsigned long start_vm; 106unsigned long start_vm;
@@ -197,20 +196,19 @@ __uml_setup("--help", Usage,
197" Prints this message.\n\n" 196" Prints this message.\n\n"
198); 197);
199 198
200static int __init uml_checksetup(char *line, int *add) 199static void __init uml_checksetup(char *line, int *add)
201{ 200{
202 struct uml_param *p; 201 struct uml_param *p;
203 202
204 p = &__uml_setup_start; 203 p = &__uml_setup_start;
205 while(p < &__uml_setup_end) { 204 while (p < &__uml_setup_end) {
206 int n; 205 int n;
207 206
208 n = strlen(p->str); 207 n = strlen(p->str);
209 if (!strncmp(line, p->str, n) && p->setup_func(line + n, add)) 208 if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
210 return 1; 209 return;
211 p++; 210 p++;
212 } 211 }
213 return 0;
214} 212}
215 213
216static void __init uml_postsetup(void) 214static void __init uml_postsetup(void)
@@ -218,13 +216,30 @@ static void __init uml_postsetup(void)
218 initcall_t *p; 216 initcall_t *p;
219 217
220 p = &__uml_postsetup_start; 218 p = &__uml_postsetup_start;
221 while(p < &__uml_postsetup_end) { 219 while (p < &__uml_postsetup_end) {
222 (*p)(); 220 (*p)();
223 p++; 221 p++;
224 } 222 }
225 return; 223 return;
226} 224}
227 225
226static int panic_exit(struct notifier_block *self, unsigned long unused1,
227 void *unused2)
228{
229 bust_spinlocks(1);
230 show_regs(&(current->thread.regs));
231 bust_spinlocks(0);
232 uml_exitcode = 1;
233 os_dump_core();
234 return 0;
235}
236
237static struct notifier_block panic_exit_notifier = {
238 .notifier_call = panic_exit,
239 .next = NULL,
240 .priority = 0
241};
242
228/* Set during early boot */ 243/* Set during early boot */
229unsigned long brk_start; 244unsigned long brk_start;
230unsigned long end_iomem; 245unsigned long end_iomem;
@@ -234,20 +249,6 @@ EXPORT_SYMBOL(end_iomem);
234 249
235extern char __binary_start; 250extern char __binary_start;
236 251
237static unsigned long set_task_sizes_skas(unsigned long *task_size_out)
238{
239 /* Round up to the nearest 4M */
240 unsigned long host_task_size = ROUND_4M((unsigned long)
241 &host_task_size);
242
243 if (!skas_needs_stub)
244 *task_size_out = host_task_size;
245 else
246 *task_size_out = STUB_START & PGDIR_MASK;
247
248 return host_task_size;
249}
250
251int __init linux_main(int argc, char **argv) 252int __init linux_main(int argc, char **argv)
252{ 253{
253 unsigned long avail, diff; 254 unsigned long avail, diff;
@@ -278,13 +279,6 @@ int __init linux_main(int argc, char **argv)
278 279
279 printf("UML running in %s mode\n", mode); 280 printf("UML running in %s mode\n", mode);
280 281
281 host_task_size = set_task_sizes_skas(&task_size);
282
283 /*
284 * Setting up handlers to 'sig_info' struct
285 */
286 os_fill_handlinfo(handlinfo_kern);
287
288 brk_start = (unsigned long) sbrk(0); 282 brk_start = (unsigned long) sbrk(0);
289 283
290 /* 284 /*
@@ -309,7 +303,7 @@ int __init linux_main(int argc, char **argv)
309 303
310 highmem = 0; 304 highmem = 0;
311 iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; 305 iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
312 max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC; 306 max_physmem = CONFIG_TOP_ADDR - uml_physmem - iomem_size - MIN_VMALLOC;
313 307
314 /* 308 /*
315 * Zones have to begin on a 1 << MAX_ORDER page boundary, 309 * Zones have to begin on a 1 << MAX_ORDER page boundary,
@@ -341,7 +335,7 @@ int __init linux_main(int argc, char **argv)
341 } 335 }
342 336
343 virtmem_size = physmem_size; 337 virtmem_size = physmem_size;
344 avail = get_kmem_end() - start_vm; 338 avail = CONFIG_TOP_ADDR - start_vm;
345 if (physmem_size > avail) 339 if (physmem_size > avail)
346 virtmem_size = avail; 340 virtmem_size = avail;
347 end_vm = start_vm + virtmem_size; 341 end_vm = start_vm + virtmem_size;
@@ -350,6 +344,9 @@ int __init linux_main(int argc, char **argv)
350 printf("Kernel virtual memory size shrunk to %lu bytes\n", 344 printf("Kernel virtual memory size shrunk to %lu bytes\n",
351 virtmem_size); 345 virtmem_size);
352 346
347 atomic_notifier_chain_register(&panic_notifier_list,
348 &panic_exit_notifier);
349
353 uml_postsetup(); 350 uml_postsetup();
354 351
355 stack_protections((unsigned long) &init_thread_info); 352 stack_protections((unsigned long) &init_thread_info);
@@ -358,29 +355,8 @@ int __init linux_main(int argc, char **argv)
358 return start_uml(); 355 return start_uml();
359} 356}
360 357
361extern int uml_exitcode;
362
363static int panic_exit(struct notifier_block *self, unsigned long unused1,
364 void *unused2)
365{
366 bust_spinlocks(1);
367 show_regs(&(current->thread.regs));
368 bust_spinlocks(0);
369 uml_exitcode = 1;
370 os_dump_core();
371 return 0;
372}
373
374static struct notifier_block panic_exit_notifier = {
375 .notifier_call = panic_exit,
376 .next = NULL,
377 .priority = 0
378};
379
380void __init setup_arch(char **cmdline_p) 358void __init setup_arch(char **cmdline_p)
381{ 359{
382 atomic_notifier_chain_register(&panic_notifier_list,
383 &panic_exit_notifier);
384 paging_init(); 360 paging_init();
385 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 361 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
386 *cmdline_p = command_line; 362 *cmdline_p = command_line;
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index 039e16efcd55..81e07e2be3ae 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -1,13 +1,12 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "asm/errno.h" 6#include <asm/errno.h>
7#include "init.h" 7#include "init.h"
8#include "os.h"
9#include "kern.h" 8#include "kern.h"
10#include "linux/kernel.h" 9#include "os.h"
11 10
12/* Changed by set_umid_arg */ 11/* Changed by set_umid_arg */
13static int umid_inited = 0; 12static int umid_inited = 0;
@@ -16,16 +15,16 @@ static int __init set_umid_arg(char *name, int *add)
16{ 15{
17 int err; 16 int err;
18 17
19 if(umid_inited){ 18 if (umid_inited) {
20 printf("umid already set\n"); 19 printf("umid already set\n");
21 return 0; 20 return 0;
22 } 21 }
23 22
24 *add = 0; 23 *add = 0;
25 err = set_umid(name); 24 err = set_umid(name);
26 if(err == -EEXIST) 25 if (err == -EEXIST)
27 printf("umid '%s' already in use\n", name); 26 printf("umid '%s' already in use\n", name);
28 else if(!err) 27 else if (!err)
29 umid_inited = 1; 28 umid_inited = 1;
30 29
31 return 0; 30 return 0;
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 13df191e2b41..5828c1d54505 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -23,7 +23,7 @@ SECTIONS
23 __init_begin = .; 23 __init_begin = .;
24 .init.text : { 24 .init.text : {
25 _sinittext = .; 25 _sinittext = .;
26 *(.init.text) 26 INIT_TEXT
27 _einittext = .; 27 _einittext = .;
28 } 28 }
29 . = ALIGN(4096); 29 . = ALIGN(4096);
@@ -48,7 +48,7 @@ SECTIONS
48 48
49 #include "asm/common.lds.S" 49 #include "asm/common.lds.S"
50 50
51 init.data : { *(init.data) } 51 init.data : { INIT_DATA }
52 .data : 52 .data :
53 { 53 {
54 . = ALIGN(KERNEL_STACK_SIZE); /* init_task */ 54 . = ALIGN(KERNEL_STACK_SIZE); /* init_task */