diff options
46 files changed, 1340 insertions, 1429 deletions
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h index e16ebce1290d..884a9c17eea0 100644 --- a/arch/um/include/irq_user.h +++ b/arch/um/include/irq_user.h | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef __IRQ_USER_H__ | 6 | #ifndef __IRQ_USER_H__ |
7 | #define __IRQ_USER_H__ | 7 | #define __IRQ_USER_H__ |
8 | 8 | ||
9 | #include "uml-config.h" | 9 | #include "sysdep/ptrace.h" |
10 | 10 | ||
11 | struct irq_fd { | 11 | struct irq_fd { |
12 | struct irq_fd *next; | 12 | struct irq_fd *next; |
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h index 9d3110f41ddb..7e7e84e4bc53 100644 --- a/arch/um/include/kern_util.h +++ b/arch/um/include/kern_util.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -8,7 +8,6 @@ | |||
8 | 8 | ||
9 | #include "sysdep/ptrace.h" | 9 | #include "sysdep/ptrace.h" |
10 | #include "sysdep/faultinfo.h" | 10 | #include "sysdep/faultinfo.h" |
11 | #include "uml-config.h" | ||
12 | 11 | ||
13 | typedef void (*kern_hndl)(int, struct uml_pt_regs *); | 12 | typedef void (*kern_hndl)(int, struct uml_pt_regs *); |
14 | 13 | ||
diff --git a/arch/um/include/os.h b/arch/um/include/os.h index daf188843a94..96f333cd560d 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h | |||
@@ -1,20 +1,18 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef __OS_H__ | 6 | #ifndef __OS_H__ |
7 | #define __OS_H__ | 7 | #define __OS_H__ |
8 | 8 | ||
9 | #include "uml-config.h" | 9 | #include <stdarg.h> |
10 | #include "asm/types.h" | ||
11 | #include "../os/include/file.h" | ||
12 | #include "sysdep/ptrace.h" | ||
13 | #include "kern_util.h" | ||
14 | #include "skas/mm_id.h" | ||
15 | #include "irq_user.h" | 10 | #include "irq_user.h" |
11 | #include "kern_util.h" | ||
12 | #include "longjmp.h" | ||
13 | #include "mm_id.h" | ||
16 | #include "sysdep/tls.h" | 14 | #include "sysdep/tls.h" |
17 | #include "sysdep/archsetjmp.h" | 15 | #include "../os/include/file.h" |
18 | 16 | ||
19 | #define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR)) | 17 | #define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR)) |
20 | 18 | ||
@@ -140,7 +138,7 @@ extern int os_set_slip(int fd); | |||
140 | extern int os_set_owner(int fd, int pid); | 138 | extern int os_set_owner(int fd, int pid); |
141 | extern int os_mode_fd(int fd, int mode); | 139 | extern int os_mode_fd(int fd, int mode); |
142 | 140 | ||
143 | extern int os_seek_file(int fd, __u64 offset); | 141 | extern int os_seek_file(int fd, unsigned long long offset); |
144 | extern int os_open_file(char *file, struct openflags flags, int mode); | 142 | extern int os_open_file(char *file, struct openflags flags, int mode); |
145 | extern int os_read_file(int fd, void *buf, int len); | 143 | extern int os_read_file(int fd, void *buf, int len); |
146 | extern int os_write_file(int fd, const void *buf, int count); | 144 | extern int os_write_file(int fd, const void *buf, int count); |
diff --git a/arch/um/include/skas/skas.h b/arch/um/include/skas/skas.h index b4a95e485929..b073f8a86bd3 100644 --- a/arch/um/include/skas/skas.h +++ b/arch/um/include/skas/skas.h | |||
@@ -1,12 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef __SKAS_H | 6 | #ifndef __SKAS_H |
7 | #define __SKAS_H | 7 | #define __SKAS_H |
8 | 8 | ||
9 | #include "mm_id.h" | ||
10 | #include "sysdep/ptrace.h" | 9 | #include "sysdep/ptrace.h" |
11 | 10 | ||
12 | extern int userspace_pid[]; | 11 | extern int userspace_pid[]; |
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h index d765175d42be..b843941acd1d 100644 --- a/arch/um/include/sysdep-i386/ptrace.h +++ b/arch/um/include/sysdep-i386/ptrace.h | |||
@@ -97,12 +97,12 @@ struct syscall_args { | |||
97 | }; | 97 | }; |
98 | 98 | ||
99 | #define SYSCALL_ARGS(r) ((struct syscall_args) \ | 99 | #define SYSCALL_ARGS(r) ((struct syscall_args) \ |
100 | { .args = { UPT_SYSCALL_ARG1(r), \ | 100 | { .args = { UPT_SYSCALL_ARG1(r), \ |
101 | UPT_SYSCALL_ARG2(r), \ | 101 | UPT_SYSCALL_ARG2(r), \ |
102 | UPT_SYSCALL_ARG3(r), \ | 102 | UPT_SYSCALL_ARG3(r), \ |
103 | UPT_SYSCALL_ARG4(r), \ | 103 | UPT_SYSCALL_ARG4(r), \ |
104 | UPT_SYSCALL_ARG5(r), \ | 104 | UPT_SYSCALL_ARG5(r), \ |
105 | UPT_SYSCALL_ARG6(r) } } ) | 105 | UPT_SYSCALL_ARG6(r) } } ) |
106 | 106 | ||
107 | #define UPT_REG(regs, reg) \ | 107 | #define UPT_REG(regs, reg) \ |
108 | ({ unsigned long val; \ | 108 | ({ unsigned long val; \ |
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 5064fb691eb5..0d260567fd15 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c | |||
@@ -1,24 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/slab.h" | 6 | #include "linux/stddef.h" |
7 | #include "linux/fs.h" | ||
7 | #include "linux/smp_lock.h" | 8 | #include "linux/smp_lock.h" |
8 | #include "linux/ptrace.h" | 9 | #include "linux/ptrace.h" |
9 | #include "linux/fs.h" | 10 | #include "linux/sched.h" |
10 | #include "asm/ptrace.h" | 11 | #include "asm/current.h" |
11 | #include "asm/pgtable.h" | 12 | #include "asm/processor.h" |
12 | #include "asm/tlbflush.h" | ||
13 | #include "asm/uaccess.h" | 13 | #include "asm/uaccess.h" |
14 | #include "kern_util.h" | ||
15 | #include "as-layout.h" | ||
16 | #include "mem_user.h" | 14 | #include "mem_user.h" |
17 | #include "kern.h" | 15 | #include "skas.h" |
18 | #include "irq_user.h" | ||
19 | #include "tlb.h" | ||
20 | #include "os.h" | 16 | #include "os.h" |
21 | #include "skas/skas.h" | ||
22 | 17 | ||
23 | void flush_thread(void) | 18 | void flush_thread(void) |
24 | { | 19 | { |
@@ -29,8 +24,8 @@ void flush_thread(void) | |||
29 | arch_flush_thread(¤t->thread.arch); | 24 | arch_flush_thread(¤t->thread.arch); |
30 | 25 | ||
31 | ret = unmap(¤t->mm->context.skas.id, 0, end, 1, &data); | 26 | ret = unmap(¤t->mm->context.skas.id, 0, end, 1, &data); |
32 | if(ret){ | 27 | if (ret) { |
33 | printk("flush_thread - clearing address space failed, " | 28 | printk(KERN_ERR "flush_thread - clearing address space failed, " |
34 | "err = %d\n", ret); | 29 | "err = %d\n", ret); |
35 | force_sig(SIGKILL, current); | 30 | force_sig(SIGKILL, current); |
36 | } | 31 | } |
@@ -52,7 +47,7 @@ extern void log_exec(char **argv, void *tty); | |||
52 | static long execve1(char *file, char __user * __user *argv, | 47 | static long execve1(char *file, char __user * __user *argv, |
53 | char __user *__user *env) | 48 | char __user *__user *env) |
54 | { | 49 | { |
55 | long error; | 50 | long error; |
56 | #ifdef CONFIG_TTY_LOG | 51 | #ifdef CONFIG_TTY_LOG |
57 | struct tty_struct *tty; | 52 | struct tty_struct *tty; |
58 | 53 | ||
@@ -62,16 +57,16 @@ static long execve1(char *file, char __user * __user *argv, | |||
62 | log_exec(argv, tty); | 57 | log_exec(argv, tty); |
63 | mutex_unlock(&tty_mutex); | 58 | mutex_unlock(&tty_mutex); |
64 | #endif | 59 | #endif |
65 | error = do_execve(file, argv, env, ¤t->thread.regs); | 60 | error = do_execve(file, argv, env, ¤t->thread.regs); |
66 | if (error == 0){ | 61 | if (error == 0) { |
67 | task_lock(current); | 62 | task_lock(current); |
68 | current->ptrace &= ~PT_DTRACE; | 63 | current->ptrace &= ~PT_DTRACE; |
69 | #ifdef SUBARCH_EXECVE1 | 64 | #ifdef SUBARCH_EXECVE1 |
70 | SUBARCH_EXECVE1(¤t->thread.regs.regs); | 65 | SUBARCH_EXECVE1(¤t->thread.regs.regs); |
71 | #endif | 66 | #endif |
72 | task_unlock(current); | 67 | task_unlock(current); |
73 | } | 68 | } |
74 | return(error); | 69 | return error; |
75 | } | 70 | } |
76 | 71 | ||
77 | long um_execve(char *file, char __user *__user *argv, char __user *__user *env) | 72 | long um_execve(char *file, char __user *__user *argv, char __user *__user *env) |
@@ -79,9 +74,9 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env) | |||
79 | long err; | 74 | long err; |
80 | 75 | ||
81 | err = execve1(file, argv, env); | 76 | err = execve1(file, argv, env); |
82 | if(!err) | 77 | if (!err) |
83 | do_longjmp(current->thread.exec_buf, 1); | 78 | do_longjmp(current->thread.exec_buf, 1); |
84 | return(err); | 79 | return err; |
85 | } | 80 | } |
86 | 81 | ||
87 | long sys_execve(char __user *file, char __user *__user *argv, | 82 | long sys_execve(char __user *file, char __user *__user *argv, |
@@ -98,5 +93,5 @@ long sys_execve(char __user *file, char __user *__user *argv, | |||
98 | putname(filename); | 93 | putname(filename); |
99 | out: | 94 | out: |
100 | unlock_kernel(); | 95 | unlock_kernel(); |
101 | return(error); | 96 | return error; |
102 | } | 97 | } |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index b10ee28b97cb..277fce17b088 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -1,37 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c: | 4 | * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c: |
5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | 5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "linux/kernel.h" | 8 | #include "linux/cpumask.h" |
9 | #include "linux/module.h" | 9 | #include "linux/hardirq.h" |
10 | #include "linux/smp.h" | ||
11 | #include "linux/kernel_stat.h" | ||
12 | #include "linux/interrupt.h" | 10 | #include "linux/interrupt.h" |
13 | #include "linux/random.h" | 11 | #include "linux/kernel_stat.h" |
14 | #include "linux/slab.h" | 12 | #include "linux/module.h" |
15 | #include "linux/file.h" | ||
16 | #include "linux/proc_fs.h" | ||
17 | #include "linux/init.h" | ||
18 | #include "linux/seq_file.h" | 13 | #include "linux/seq_file.h" |
19 | #include "linux/profile.h" | 14 | #include "as-layout.h" |
20 | #include "linux/hardirq.h" | ||
21 | #include "asm/irq.h" | ||
22 | #include "asm/hw_irq.h" | ||
23 | #include "asm/atomic.h" | ||
24 | #include "asm/signal.h" | ||
25 | #include "asm/system.h" | ||
26 | #include "asm/errno.h" | ||
27 | #include "asm/uaccess.h" | ||
28 | #include "kern_util.h" | 15 | #include "kern_util.h" |
29 | #include "irq_user.h" | ||
30 | #include "irq_kern.h" | ||
31 | #include "os.h" | 16 | #include "os.h" |
32 | #include "sigio.h" | ||
33 | #include "misc_constants.h" | ||
34 | #include "as-layout.h" | ||
35 | 17 | ||
36 | /* | 18 | /* |
37 | * Generic, controller-independent functions: | 19 | * Generic, controller-independent functions: |
@@ -71,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | seq_putc(p, '\n'); | 53 | seq_putc(p, '\n'); |
72 | skip: | 54 | skip: |
73 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 55 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
74 | } else if (i == NR_IRQS) { | 56 | } else if (i == NR_IRQS) |
75 | seq_putc(p, '\n'); | 57 | seq_putc(p, '\n'); |
76 | } | ||
77 | 58 | ||
78 | return 0; | 59 | return 0; |
79 | } | 60 | } |
@@ -102,11 +83,13 @@ void sigio_handler(int sig, struct uml_pt_regs *regs) | |||
102 | while (1) { | 83 | while (1) { |
103 | n = os_waiting_for_events(active_fds); | 84 | n = os_waiting_for_events(active_fds); |
104 | if (n <= 0) { | 85 | if (n <= 0) { |
105 | if(n == -EINTR) continue; | 86 | if (n == -EINTR) |
87 | continue; | ||
106 | else break; | 88 | else break; |
107 | } | 89 | } |
108 | 90 | ||
109 | for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { | 91 | for (irq_fd = active_fds; irq_fd != NULL; |
92 | irq_fd = irq_fd->next) { | ||
110 | if (irq_fd->current_events != 0) { | 93 | if (irq_fd->current_events != 0) { |
111 | irq_fd->current_events = 0; | 94 | irq_fd->current_events = 0; |
112 | do_IRQ(irq_fd->irq, regs); | 95 | do_IRQ(irq_fd->irq, regs); |
@@ -138,8 +121,7 @@ int activate_fd(int irq, int fd, int type, void *dev_id) | |||
138 | 121 | ||
139 | if (type == IRQ_READ) | 122 | if (type == IRQ_READ) |
140 | events = UM_POLLIN | UM_POLLPRI; | 123 | events = UM_POLLIN | UM_POLLPRI; |
141 | else | 124 | else events = UM_POLLOUT; |
142 | events = UM_POLLOUT; | ||
143 | *new_fd = ((struct irq_fd) { .next = NULL, | 125 | *new_fd = ((struct irq_fd) { .next = NULL, |
144 | .id = dev_id, | 126 | .id = dev_id, |
145 | .fd = fd, | 127 | .fd = fd, |
@@ -153,9 +135,10 @@ int activate_fd(int irq, int fd, int type, void *dev_id) | |||
153 | spin_lock_irqsave(&irq_lock, flags); | 135 | spin_lock_irqsave(&irq_lock, flags); |
154 | for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { | 136 | for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { |
155 | if ((irq_fd->fd == fd) && (irq_fd->type == type)) { | 137 | if ((irq_fd->fd == fd) && (irq_fd->type == type)) { |
156 | printk("Registering fd %d twice\n", fd); | 138 | printk(KERN_ERR "Registering fd %d twice\n", fd); |
157 | printk("Irqs : %d, %d\n", irq_fd->irq, irq); | 139 | printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq); |
158 | printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id); | 140 | printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id, |
141 | dev_id); | ||
159 | goto out_unlock; | 142 | goto out_unlock; |
160 | } | 143 | } |
161 | } | 144 | } |
@@ -171,7 +154,8 @@ int activate_fd(int irq, int fd, int type, void *dev_id) | |||
171 | if (n == 0) | 154 | if (n == 0) |
172 | break; | 155 | break; |
173 | 156 | ||
174 | /* n > 0 | 157 | /* |
158 | * n > 0 | ||
175 | * It means we couldn't put new pollfd to current pollfds | 159 | * It means we couldn't put new pollfd to current pollfds |
176 | * and tmp_fds is NULL or too small for new pollfds array. | 160 | * and tmp_fds is NULL or too small for new pollfds array. |
177 | * Needed size is equal to n as minimum. | 161 | * Needed size is equal to n as minimum. |
@@ -197,7 +181,8 @@ int activate_fd(int irq, int fd, int type, void *dev_id) | |||
197 | 181 | ||
198 | spin_unlock_irqrestore(&irq_lock, flags); | 182 | spin_unlock_irqrestore(&irq_lock, flags); |
199 | 183 | ||
200 | /* This calls activate_fd, so it has to be outside the critical | 184 | /* |
185 | * This calls activate_fd, so it has to be outside the critical | ||
201 | * section. | 186 | * section. |
202 | */ | 187 | */ |
203 | maybe_sigio_broken(fd, (type == IRQ_READ)); | 188 | maybe_sigio_broken(fd, (type == IRQ_READ)); |
@@ -264,13 +249,14 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) | |||
264 | i++; | 249 | i++; |
265 | } | 250 | } |
266 | if (irq == NULL) { | 251 | if (irq == NULL) { |
267 | printk("find_irq_by_fd doesn't have descriptor %d\n", fd); | 252 | printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n", |
253 | fd); | ||
268 | goto out; | 254 | goto out; |
269 | } | 255 | } |
270 | fdi = os_get_pollfd(i); | 256 | fdi = os_get_pollfd(i); |
271 | if ((fdi != -1) && (fdi != fd)) { | 257 | if ((fdi != -1) && (fdi != fd)) { |
272 | printk("find_irq_by_fd - mismatch between active_fds and " | 258 | printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds " |
273 | "pollfds, fd %d vs %d, need %d\n", irq->fd, | 259 | "and pollfds, fd %d vs %d, need %d\n", irq->fd, |
274 | fdi, fd); | 260 | fdi, fd); |
275 | irq = NULL; | 261 | irq = NULL; |
276 | goto out; | 262 | goto out; |
@@ -306,7 +292,7 @@ void deactivate_fd(int fd, int irqnum) | |||
306 | 292 | ||
307 | spin_lock_irqsave(&irq_lock, flags); | 293 | spin_lock_irqsave(&irq_lock, flags); |
308 | irq = find_irq_by_fd(fd, irqnum, &i); | 294 | irq = find_irq_by_fd(fd, irqnum, &i); |
309 | if(irq == NULL){ | 295 | if (irq == NULL) { |
310 | spin_unlock_irqrestore(&irq_lock, flags); | 296 | spin_unlock_irqrestore(&irq_lock, flags); |
311 | return; | 297 | return; |
312 | } | 298 | } |
@@ -372,8 +358,10 @@ int um_request_irq(unsigned int irq, int fd, int type, | |||
372 | EXPORT_SYMBOL(um_request_irq); | 358 | EXPORT_SYMBOL(um_request_irq); |
373 | EXPORT_SYMBOL(reactivate_fd); | 359 | EXPORT_SYMBOL(reactivate_fd); |
374 | 360 | ||
375 | /* hw_interrupt_type must define (startup || enable) && | 361 | /* |
376 | * (shutdown || disable) && end */ | 362 | * hw_interrupt_type must define (startup || enable) && |
363 | * (shutdown || disable) && end | ||
364 | */ | ||
377 | static void dummy(unsigned int irq) | 365 | static void dummy(unsigned int irq) |
378 | { | 366 | { |
379 | } | 367 | } |
@@ -422,7 +410,8 @@ int init_aio_irq(int irq, char *name, irq_handler_t handler) | |||
422 | 410 | ||
423 | err = os_pipe(fds, 1, 1); | 411 | err = os_pipe(fds, 1, 1); |
424 | if (err) { | 412 | if (err) { |
425 | printk("init_aio_irq - os_pipe failed, err = %d\n", -err); | 413 | printk(KERN_ERR "init_aio_irq - os_pipe failed, err = %d\n", |
414 | -err); | ||
426 | goto out; | 415 | goto out; |
427 | } | 416 | } |
428 | 417 | ||
@@ -430,7 +419,8 @@ int init_aio_irq(int irq, char *name, irq_handler_t handler) | |||
430 | IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name, | 419 | IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name, |
431 | (void *) (long) fds[0]); | 420 | (void *) (long) fds[0]); |
432 | if (err) { | 421 | if (err) { |
433 | printk("init_aio_irq - : um_request_irq failed, err = %d\n", | 422 | printk(KERN_ERR "init_aio_irq - : um_request_irq failed, " |
423 | "err = %d\n", | ||
434 | err); | 424 | err); |
435 | goto out_close; | 425 | goto out_close; |
436 | } | 426 | } |
@@ -501,8 +491,9 @@ unsigned long to_irq_stack(unsigned long *mask_out) | |||
501 | int nested; | 491 | int nested; |
502 | 492 | ||
503 | mask = xchg(&pending_mask, *mask_out); | 493 | mask = xchg(&pending_mask, *mask_out); |
504 | if(mask != 0){ | 494 | if (mask != 0) { |
505 | /* If any interrupts come in at this point, we want to | 495 | /* |
496 | * If any interrupts come in at this point, we want to | ||
506 | * make sure that their bits aren't lost by our | 497 | * make sure that their bits aren't lost by our |
507 | * putting our bit in. So, this loop accumulates bits | 498 | * putting our bit in. So, this loop accumulates bits |
508 | * until xchg returns the same value that we put in. | 499 | * until xchg returns the same value that we put in. |
@@ -514,13 +505,13 @@ unsigned long to_irq_stack(unsigned long *mask_out) | |||
514 | do { | 505 | do { |
515 | old |= mask; | 506 | old |= mask; |
516 | mask = xchg(&pending_mask, old); | 507 | mask = xchg(&pending_mask, old); |
517 | } while(mask != old); | 508 | } while (mask != old); |
518 | return 1; | 509 | return 1; |
519 | } | 510 | } |
520 | 511 | ||
521 | ti = current_thread_info(); | 512 | ti = current_thread_info(); |
522 | nested = (ti->real_thread != NULL); | 513 | nested = (ti->real_thread != NULL); |
523 | if(!nested){ | 514 | if (!nested) { |
524 | struct task_struct *task; | 515 | struct task_struct *task; |
525 | struct thread_info *tti; | 516 | struct thread_info *tti; |
526 | 517 | ||
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index 90e89e838173..a55d221d8a4c 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c | |||
@@ -75,7 +75,7 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len, | |||
75 | err = os_map_memory((void *) virt, fd, offset, len, r, w, x); | 75 | err = os_map_memory((void *) virt, fd, offset, len, r, w, x); |
76 | if (err) { | 76 | if (err) { |
77 | if (err == -ENOMEM) | 77 | if (err == -ENOMEM) |
78 | printk("try increasing the host's " | 78 | printk(KERN_ERR "try increasing the host's " |
79 | "/proc/sys/vm/max_map_count to <physical " | 79 | "/proc/sys/vm/max_map_count to <physical " |
80 | "memory size>/4096\n"); | 80 | "memory size>/4096\n"); |
81 | panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, " | 81 | panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, " |
@@ -103,7 +103,8 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end, | |||
103 | exit(1); | 103 | exit(1); |
104 | } | 104 | } |
105 | 105 | ||
106 | /* Special kludge - This page will be mapped in to userspace processes | 106 | /* |
107 | * Special kludge - This page will be mapped in to userspace processes | ||
107 | * from physmem_fd, so it needs to be written out there. | 108 | * from physmem_fd, so it needs to be written out there. |
108 | */ | 109 | */ |
109 | os_seek_file(physmem_fd, __pa(&__syscall_stub_start)); | 110 | os_seek_file(physmem_fd, __pa(&__syscall_stub_start)); |
@@ -202,8 +203,8 @@ int setup_iomem(void) | |||
202 | err = os_map_memory((void *) iomem_start, region->fd, 0, | 203 | err = os_map_memory((void *) iomem_start, region->fd, 0, |
203 | region->size, 1, 1, 0); | 204 | region->size, 1, 1, 0); |
204 | if (err) | 205 | if (err) |
205 | printk("Mapping iomem region for driver '%s' failed, " | 206 | printk(KERN_ERR "Mapping iomem region for driver '%s' " |
206 | "errno = %d\n", region->driver, -err); | 207 | "failed, errno = %d\n", region->driver, -err); |
207 | else { | 208 | else { |
208 | region->virt = iomem_start; | 209 | region->virt = iomem_start; |
209 | region->phys = __pa(region->virt); | 210 | region->phys = __pa(region->virt); |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index d3b9c62e73c7..7c037fa9c5b8 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -1,51 +1,29 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Copyright 2003 PathScale, Inc. | 3 | * Copyright 2003 PathScale, Inc. |
4 | * Licensed under the GPL | 4 | * Licensed under the GPL |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "linux/kernel.h" | 7 | #include "linux/stddef.h" |
8 | #include "linux/sched.h" | 8 | #include "linux/err.h" |
9 | #include "linux/interrupt.h" | 9 | #include "linux/hardirq.h" |
10 | #include "linux/string.h" | ||
11 | #include "linux/mm.h" | 10 | #include "linux/mm.h" |
12 | #include "linux/slab.h" | 11 | #include "linux/personality.h" |
13 | #include "linux/utsname.h" | ||
14 | #include "linux/fs.h" | ||
15 | #include "linux/utime.h" | ||
16 | #include "linux/smp_lock.h" | ||
17 | #include "linux/module.h" | ||
18 | #include "linux/init.h" | ||
19 | #include "linux/capability.h" | ||
20 | #include "linux/vmalloc.h" | ||
21 | #include "linux/spinlock.h" | ||
22 | #include "linux/proc_fs.h" | 12 | #include "linux/proc_fs.h" |
23 | #include "linux/ptrace.h" | 13 | #include "linux/ptrace.h" |
24 | #include "linux/random.h" | 14 | #include "linux/random.h" |
25 | #include "linux/personality.h" | 15 | #include "linux/sched.h" |
26 | #include "asm/unistd.h" | 16 | #include "linux/threads.h" |
27 | #include "asm/mman.h" | ||
28 | #include "asm/segment.h" | ||
29 | #include "asm/stat.h" | ||
30 | #include "asm/pgtable.h" | 17 | #include "asm/pgtable.h" |
31 | #include "asm/processor.h" | ||
32 | #include "asm/tlbflush.h" | ||
33 | #include "asm/uaccess.h" | 18 | #include "asm/uaccess.h" |
34 | #include "asm/user.h" | ||
35 | #include "kern_util.h" | ||
36 | #include "as-layout.h" | 19 | #include "as-layout.h" |
37 | #include "kern.h" | 20 | #include "kern_util.h" |
38 | #include "signal_kern.h" | ||
39 | #include "init.h" | ||
40 | #include "irq_user.h" | ||
41 | #include "mem_user.h" | ||
42 | #include "tlb.h" | ||
43 | #include "frame_kern.h" | ||
44 | #include "sigcontext.h" | ||
45 | #include "os.h" | 21 | #include "os.h" |
46 | #include "skas.h" | 22 | #include "skas.h" |
23 | #include "tlb.h" | ||
47 | 24 | ||
48 | /* This is a per-cpu array. A processor only modifies its entry and it only | 25 | /* |
26 | * This is a per-cpu array. A processor only modifies its entry and it only | ||
49 | * cares about its entry, so it's OK if another processor is modifying its | 27 | * cares about its entry, so it's OK if another processor is modifying its |
50 | * entry. | 28 | * entry. |
51 | */ | 29 | */ |
@@ -54,15 +32,15 @@ struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | |||
54 | static inline int external_pid(struct task_struct *task) | 32 | static inline int external_pid(struct task_struct *task) |
55 | { | 33 | { |
56 | /* FIXME: Need to look up userspace_pid by cpu */ | 34 | /* FIXME: Need to look up userspace_pid by cpu */ |
57 | return(userspace_pid[0]); | 35 | return userspace_pid[0]; |
58 | } | 36 | } |
59 | 37 | ||
60 | int pid_to_processor_id(int pid) | 38 | int pid_to_processor_id(int pid) |
61 | { | 39 | { |
62 | int i; | 40 | int i; |
63 | 41 | ||
64 | for(i = 0; i < ncpus; i++){ | 42 | for(i = 0; i < ncpus; i++) { |
65 | if(cpu_tasks[i].pid == pid) | 43 | if (cpu_tasks[i].pid == pid) |
66 | return i; | 44 | return i; |
67 | } | 45 | } |
68 | return -1; | 46 | return -1; |
@@ -118,7 +96,7 @@ void *_switch_to(void *prev, void *next, void *last) | |||
118 | current->thread.saved_task = NULL; | 96 | current->thread.saved_task = NULL; |
119 | 97 | ||
120 | /* XXX need to check runqueues[cpu].idle */ | 98 | /* XXX need to check runqueues[cpu].idle */ |
121 | if(current->pid == 0) | 99 | if (current->pid == 0) |
122 | switch_timers(0); | 100 | switch_timers(0); |
123 | 101 | ||
124 | switch_threads(&from->thread.switch_buf, | 102 | switch_threads(&from->thread.switch_buf, |
@@ -126,10 +104,10 @@ void *_switch_to(void *prev, void *next, void *last) | |||
126 | 104 | ||
127 | arch_switch_to(current->thread.prev_sched, current); | 105 | arch_switch_to(current->thread.prev_sched, current); |
128 | 106 | ||
129 | if(current->pid == 0) | 107 | if (current->pid == 0) |
130 | switch_timers(1); | 108 | switch_timers(1); |
131 | 109 | ||
132 | if(current->thread.saved_task) | 110 | if (current->thread.saved_task) |
133 | show_regs(&(current->thread.regs)); | 111 | show_regs(&(current->thread.regs)); |
134 | next= current->thread.saved_task; | 112 | next= current->thread.saved_task; |
135 | prev= current; | 113 | prev= current; |
@@ -141,9 +119,9 @@ void *_switch_to(void *prev, void *next, void *last) | |||
141 | 119 | ||
142 | void interrupt_end(void) | 120 | void interrupt_end(void) |
143 | { | 121 | { |
144 | if(need_resched()) | 122 | if (need_resched()) |
145 | schedule(); | 123 | schedule(); |
146 | if(test_tsk_thread_flag(current, TIF_SIGPENDING)) | 124 | if (test_tsk_thread_flag(current, TIF_SIGPENDING)) |
147 | do_signal(); | 125 | do_signal(); |
148 | } | 126 | } |
149 | 127 | ||
@@ -158,7 +136,8 @@ void *get_current(void) | |||
158 | 136 | ||
159 | extern void schedule_tail(struct task_struct *prev); | 137 | extern void schedule_tail(struct task_struct *prev); |
160 | 138 | ||
161 | /* This is called magically, by its address being stuffed in a jmp_buf | 139 | /* |
140 | * This is called magically, by its address being stuffed in a jmp_buf | ||
162 | * and being longjmp-d to. | 141 | * and being longjmp-d to. |
163 | */ | 142 | */ |
164 | void new_thread_handler(void) | 143 | void new_thread_handler(void) |
@@ -166,18 +145,19 @@ void new_thread_handler(void) | |||
166 | int (*fn)(void *), n; | 145 | int (*fn)(void *), n; |
167 | void *arg; | 146 | void *arg; |
168 | 147 | ||
169 | if(current->thread.prev_sched != NULL) | 148 | if (current->thread.prev_sched != NULL) |
170 | schedule_tail(current->thread.prev_sched); | 149 | schedule_tail(current->thread.prev_sched); |
171 | current->thread.prev_sched = NULL; | 150 | current->thread.prev_sched = NULL; |
172 | 151 | ||
173 | fn = current->thread.request.u.thread.proc; | 152 | fn = current->thread.request.u.thread.proc; |
174 | arg = current->thread.request.u.thread.arg; | 153 | arg = current->thread.request.u.thread.arg; |
175 | 154 | ||
176 | /* The return value is 1 if the kernel thread execs a process, | 155 | /* |
156 | * The return value is 1 if the kernel thread execs a process, | ||
177 | * 0 if it just exits | 157 | * 0 if it just exits |
178 | */ | 158 | */ |
179 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | 159 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); |
180 | if(n == 1){ | 160 | if (n == 1) { |
181 | /* Handle any immediate reschedules or signals */ | 161 | /* Handle any immediate reschedules or signals */ |
182 | interrupt_end(); | 162 | interrupt_end(); |
183 | userspace(¤t->thread.regs.regs); | 163 | userspace(¤t->thread.regs.regs); |
@@ -189,14 +169,16 @@ void new_thread_handler(void) | |||
189 | void fork_handler(void) | 169 | void fork_handler(void) |
190 | { | 170 | { |
191 | force_flush_all(); | 171 | force_flush_all(); |
192 | if(current->thread.prev_sched == NULL) | 172 | if (current->thread.prev_sched == NULL) |
193 | panic("blech"); | 173 | panic("blech"); |
194 | 174 | ||
195 | schedule_tail(current->thread.prev_sched); | 175 | schedule_tail(current->thread.prev_sched); |
196 | 176 | ||
197 | /* XXX: if interrupt_end() calls schedule, this call to | 177 | /* |
178 | * XXX: if interrupt_end() calls schedule, this call to | ||
198 | * arch_switch_to isn't needed. We could want to apply this to | 179 | * arch_switch_to isn't needed. We could want to apply this to |
199 | * improve performance. -bb */ | 180 | * improve performance. -bb |
181 | */ | ||
200 | arch_switch_to(current->thread.prev_sched, current); | 182 | arch_switch_to(current->thread.prev_sched, current); |
201 | 183 | ||
202 | current->thread.prev_sched = NULL; | 184 | current->thread.prev_sched = NULL; |
@@ -216,11 +198,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
216 | 198 | ||
217 | p->thread = (struct thread_struct) INIT_THREAD; | 199 | p->thread = (struct thread_struct) INIT_THREAD; |
218 | 200 | ||
219 | if(current->thread.forking){ | 201 | if (current->thread.forking) { |
220 | memcpy(&p->thread.regs.regs, ®s->regs, | 202 | memcpy(&p->thread.regs.regs, ®s->regs, |
221 | sizeof(p->thread.regs.regs)); | 203 | sizeof(p->thread.regs.regs)); |
222 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); | 204 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); |
223 | if(sp != 0) | 205 | if (sp != 0) |
224 | REGS_SP(p->thread.regs.regs.regs) = sp; | 206 | REGS_SP(p->thread.regs.regs.regs) = sp; |
225 | 207 | ||
226 | handler = fork_handler; | 208 | handler = fork_handler; |
@@ -259,14 +241,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg) | |||
259 | 241 | ||
260 | void default_idle(void) | 242 | void default_idle(void) |
261 | { | 243 | { |
262 | while(1){ | 244 | while(1) { |
263 | /* endless idle loop with no priority at all */ | 245 | /* endless idle loop with no priority at all */ |
264 | 246 | ||
265 | /* | 247 | /* |
266 | * although we are an idle CPU, we do not want to | 248 | * although we are an idle CPU, we do not want to |
267 | * get into the scheduler unnecessarily. | 249 | * get into the scheduler unnecessarily. |
268 | */ | 250 | */ |
269 | if(need_resched()) | 251 | if (need_resched()) |
270 | schedule(); | 252 | schedule(); |
271 | 253 | ||
272 | idle_sleep(10); | 254 | idle_sleep(10); |
@@ -288,26 +270,26 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | |||
288 | pte_t *pte; | 270 | pte_t *pte; |
289 | pte_t ptent; | 271 | pte_t ptent; |
290 | 272 | ||
291 | if(task->mm == NULL) | 273 | if (task->mm == NULL) |
292 | return ERR_PTR(-EINVAL); | 274 | return ERR_PTR(-EINVAL); |
293 | pgd = pgd_offset(task->mm, addr); | 275 | pgd = pgd_offset(task->mm, addr); |
294 | if(!pgd_present(*pgd)) | 276 | if (!pgd_present(*pgd)) |
295 | return ERR_PTR(-EINVAL); | 277 | return ERR_PTR(-EINVAL); |
296 | 278 | ||
297 | pud = pud_offset(pgd, addr); | 279 | pud = pud_offset(pgd, addr); |
298 | if(!pud_present(*pud)) | 280 | if (!pud_present(*pud)) |
299 | return ERR_PTR(-EINVAL); | 281 | return ERR_PTR(-EINVAL); |
300 | 282 | ||
301 | pmd = pmd_offset(pud, addr); | 283 | pmd = pmd_offset(pud, addr); |
302 | if(!pmd_present(*pmd)) | 284 | if (!pmd_present(*pmd)) |
303 | return ERR_PTR(-EINVAL); | 285 | return ERR_PTR(-EINVAL); |
304 | 286 | ||
305 | pte = pte_offset_kernel(pmd, addr); | 287 | pte = pte_offset_kernel(pmd, addr); |
306 | ptent = *pte; | 288 | ptent = *pte; |
307 | if(!pte_present(ptent)) | 289 | if (!pte_present(ptent)) |
308 | return ERR_PTR(-EINVAL); | 290 | return ERR_PTR(-EINVAL); |
309 | 291 | ||
310 | if(pte_out != NULL) | 292 | if (pte_out != NULL) |
311 | *pte_out = ptent; | 293 | *pte_out = ptent; |
312 | return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); | 294 | return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); |
313 | } | 295 | } |
@@ -380,7 +362,7 @@ int smp_sigio_handler(void) | |||
380 | #ifdef CONFIG_SMP | 362 | #ifdef CONFIG_SMP |
381 | int cpu = current_thread->cpu; | 363 | int cpu = current_thread->cpu; |
382 | IPI_handler(cpu); | 364 | IPI_handler(cpu); |
383 | if(cpu != 0) | 365 | if (cpu != 0) |
384 | return 1; | 366 | return 1; |
385 | #endif | 367 | #endif |
386 | return 0; | 368 | return 0; |
@@ -408,7 +390,8 @@ int get_using_sysemu(void) | |||
408 | 390 | ||
409 | static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) | 391 | static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) |
410 | { | 392 | { |
411 | if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ | 393 | if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) |
394 | /* No overflow */ | ||
412 | *eof = 1; | 395 | *eof = 1; |
413 | 396 | ||
414 | return strlen(buf); | 397 | return strlen(buf); |
@@ -423,7 +406,8 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned | |||
423 | 406 | ||
424 | if (tmp[0] >= '0' && tmp[0] <= '2') | 407 | if (tmp[0] >= '0' && tmp[0] <= '2') |
425 | set_using_sysemu(tmp[0] - '0'); | 408 | set_using_sysemu(tmp[0] - '0'); |
426 | return count; /*We use the first char, but pretend to write everything*/ | 409 | /* We use the first char, but pretend to write everything */ |
410 | return count; | ||
427 | } | 411 | } |
428 | 412 | ||
429 | int __init make_proc_sysemu(void) | 413 | int __init make_proc_sysemu(void) |
@@ -453,10 +437,10 @@ int singlestepping(void * t) | |||
453 | struct task_struct *task = t ? t : current; | 437 | struct task_struct *task = t ? t : current; |
454 | 438 | ||
455 | if ( ! (task->ptrace & PT_DTRACE) ) | 439 | if ( ! (task->ptrace & PT_DTRACE) ) |
456 | return(0); | 440 | return 0; |
457 | 441 | ||
458 | if (task->thread.singlestep_syscall) | 442 | if (task->thread.singlestep_syscall) |
459 | return(1); | 443 | return 1; |
460 | 444 | ||
461 | return 2; | 445 | return 2; |
462 | } | 446 | } |
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index bbc3a4a9a0fa..db55a017e9b9 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c | |||
@@ -1,35 +1,27 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sched.h" | ||
7 | #include "linux/mm.h" | ||
8 | #include "linux/errno.h" | ||
9 | #include "linux/smp_lock.h" | ||
10 | #include "linux/security.h" | ||
11 | #include "linux/ptrace.h" | ||
12 | #include "linux/audit.h" | 6 | #include "linux/audit.h" |
7 | #include "linux/ptrace.h" | ||
8 | #include "linux/sched.h" | ||
9 | #include "asm/uaccess.h" | ||
13 | #ifdef CONFIG_PROC_MM | 10 | #ifdef CONFIG_PROC_MM |
14 | #include "linux/proc_mm.h" | 11 | #include "proc_mm.h" |
15 | #endif | 12 | #endif |
16 | #include "asm/ptrace.h" | ||
17 | #include "asm/uaccess.h" | ||
18 | #include "kern_util.h" | ||
19 | #include "skas_ptrace.h" | 13 | #include "skas_ptrace.h" |
20 | #include "sysdep/ptrace.h" | ||
21 | #include "os.h" | ||
22 | 14 | ||
23 | static inline void set_singlestepping(struct task_struct *child, int on) | 15 | static inline void set_singlestepping(struct task_struct *child, int on) |
24 | { | 16 | { |
25 | if (on) | 17 | if (on) |
26 | child->ptrace |= PT_DTRACE; | 18 | child->ptrace |= PT_DTRACE; |
27 | else | 19 | else |
28 | child->ptrace &= ~PT_DTRACE; | 20 | child->ptrace &= ~PT_DTRACE; |
29 | child->thread.singlestep_syscall = 0; | 21 | child->thread.singlestep_syscall = 0; |
30 | 22 | ||
31 | #ifdef SUBARCH_SET_SINGLESTEPPING | 23 | #ifdef SUBARCH_SET_SINGLESTEPPING |
32 | SUBARCH_SET_SINGLESTEPPING(child, on); | 24 | SUBARCH_SET_SINGLESTEPPING(child, on); |
33 | #endif | 25 | #endif |
34 | } | 26 | } |
35 | 27 | ||
@@ -37,8 +29,8 @@ static inline void set_singlestepping(struct task_struct *child, int on) | |||
37 | * Called by kernel/ptrace.c when detaching.. | 29 | * Called by kernel/ptrace.c when detaching.. |
38 | */ | 30 | */ |
39 | void ptrace_disable(struct task_struct *child) | 31 | void ptrace_disable(struct task_struct *child) |
40 | { | 32 | { |
41 | set_singlestepping(child,0); | 33 | set_singlestepping(child,0); |
42 | } | 34 | } |
43 | 35 | ||
44 | extern int peek_user(struct task_struct * child, long addr, long data); | 36 | extern int peek_user(struct task_struct * child, long addr, long data); |
@@ -50,40 +42,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
50 | unsigned long __user *p = (void __user *)(unsigned long)data; | 42 | unsigned long __user *p = (void __user *)(unsigned long)data; |
51 | 43 | ||
52 | switch (request) { | 44 | switch (request) { |
53 | /* when I and D space are separate, these will need to be fixed. */ | 45 | /* read word at location addr. */ |
54 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | 46 | case PTRACE_PEEKTEXT: |
55 | case PTRACE_PEEKDATA: | 47 | case PTRACE_PEEKDATA: |
56 | ret = generic_ptrace_peekdata(child, addr, data); | 48 | ret = generic_ptrace_peekdata(child, addr, data); |
57 | break; | 49 | break; |
58 | 50 | ||
59 | /* read the word at location addr in the USER area. */ | 51 | /* read the word at location addr in the USER area. */ |
60 | case PTRACE_PEEKUSR: | 52 | case PTRACE_PEEKUSR: |
61 | ret = peek_user(child, addr, data); | 53 | ret = peek_user(child, addr, data); |
62 | break; | 54 | break; |
63 | 55 | ||
64 | /* when I and D space are separate, this will have to be fixed. */ | 56 | /* write the word at location addr. */ |
65 | case PTRACE_POKETEXT: /* write the word at location addr. */ | 57 | case PTRACE_POKETEXT: |
66 | case PTRACE_POKEDATA: | 58 | case PTRACE_POKEDATA: |
67 | ret = generic_ptrace_pokedata(child, addr, data); | 59 | ret = generic_ptrace_pokedata(child, addr, data); |
68 | break; | 60 | break; |
69 | 61 | ||
70 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ | 62 | /* write the word at location addr in the USER area */ |
71 | ret = poke_user(child, addr, data); | 63 | case PTRACE_POKEUSR: |
72 | break; | 64 | ret = poke_user(child, addr, data); |
65 | break; | ||
73 | 66 | ||
74 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 67 | /* continue and stop at next (return from) syscall */ |
75 | case PTRACE_CONT: { /* restart after signal. */ | 68 | case PTRACE_SYSCALL: |
69 | /* restart after signal. */ | ||
70 | case PTRACE_CONT: { | ||
76 | ret = -EIO; | 71 | ret = -EIO; |
77 | if (!valid_signal(data)) | 72 | if (!valid_signal(data)) |
78 | break; | 73 | break; |
79 | 74 | ||
80 | set_singlestepping(child, 0); | 75 | set_singlestepping(child, 0); |
81 | if (request == PTRACE_SYSCALL) { | 76 | if (request == PTRACE_SYSCALL) |
82 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 77 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
83 | } | 78 | else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
84 | else { | ||
85 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
86 | } | ||
87 | child->exit_code = data; | 79 | child->exit_code = data; |
88 | wake_up_process(child); | 80 | wake_up_process(child); |
89 | ret = 0; | 81 | ret = 0; |
@@ -91,8 +83,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
91 | } | 83 | } |
92 | 84 | ||
93 | /* | 85 | /* |
94 | * make the child exit. Best I can do is send it a sigkill. | 86 | * make the child exit. Best I can do is send it a sigkill. |
95 | * perhaps it should be put in the status that it wants to | 87 | * perhaps it should be put in the status that it wants to |
96 | * exit. | 88 | * exit. |
97 | */ | 89 | */ |
98 | case PTRACE_KILL: { | 90 | case PTRACE_KILL: { |
@@ -100,7 +92,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
100 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | 92 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ |
101 | break; | 93 | break; |
102 | 94 | ||
103 | set_singlestepping(child, 0); | 95 | set_singlestepping(child, 0); |
104 | child->exit_code = SIGKILL; | 96 | child->exit_code = SIGKILL; |
105 | wake_up_process(child); | 97 | wake_up_process(child); |
106 | break; | 98 | break; |
@@ -111,7 +103,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
111 | if (!valid_signal(data)) | 103 | if (!valid_signal(data)) |
112 | break; | 104 | break; |
113 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 105 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
114 | set_singlestepping(child, 1); | 106 | set_singlestepping(child, 1); |
115 | child->exit_code = data; | 107 | child->exit_code = data; |
116 | /* give it a chance to run. */ | 108 | /* give it a chance to run. */ |
117 | wake_up_process(child); | 109 | wake_up_process(child); |
@@ -180,13 +172,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
180 | break; | 172 | break; |
181 | 173 | ||
182 | case PTRACE_FAULTINFO: { | 174 | case PTRACE_FAULTINFO: { |
183 | /* Take the info from thread->arch->faultinfo, | 175 | /* |
176 | * Take the info from thread->arch->faultinfo, | ||
184 | * but transfer max. sizeof(struct ptrace_faultinfo). | 177 | * but transfer max. sizeof(struct ptrace_faultinfo). |
185 | * On i386, ptrace_faultinfo is smaller! | 178 | * On i386, ptrace_faultinfo is smaller! |
186 | */ | 179 | */ |
187 | ret = copy_to_user(p, &child->thread.arch.faultinfo, | 180 | ret = copy_to_user(p, &child->thread.arch.faultinfo, |
188 | sizeof(struct ptrace_faultinfo)); | 181 | sizeof(struct ptrace_faultinfo)); |
189 | if(ret) | 182 | if (ret) |
190 | break; | 183 | break; |
191 | break; | 184 | break; |
192 | } | 185 | } |
@@ -195,12 +188,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
195 | case PTRACE_LDT: { | 188 | case PTRACE_LDT: { |
196 | struct ptrace_ldt ldt; | 189 | struct ptrace_ldt ldt; |
197 | 190 | ||
198 | if(copy_from_user(&ldt, p, sizeof(ldt))){ | 191 | if (copy_from_user(&ldt, p, sizeof(ldt))) { |
199 | ret = -EIO; | 192 | ret = -EIO; |
200 | break; | 193 | break; |
201 | } | 194 | } |
202 | 195 | ||
203 | /* This one is confusing, so just punt and return -EIO for | 196 | /* |
197 | * This one is confusing, so just punt and return -EIO for | ||
204 | * now | 198 | * now |
205 | */ | 199 | */ |
206 | ret = -EIO; | 200 | ret = -EIO; |
@@ -212,7 +206,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
212 | struct mm_struct *old = child->mm; | 206 | struct mm_struct *old = child->mm; |
213 | struct mm_struct *new = proc_mm_get_mm(data); | 207 | struct mm_struct *new = proc_mm_get_mm(data); |
214 | 208 | ||
215 | if(IS_ERR(new)){ | 209 | if (IS_ERR(new)) { |
216 | ret = PTR_ERR(new); | 210 | ret = PTR_ERR(new); |
217 | break; | 211 | break; |
218 | } | 212 | } |
@@ -226,10 +220,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
226 | } | 220 | } |
227 | #endif | 221 | #endif |
228 | #ifdef PTRACE_ARCH_PRCTL | 222 | #ifdef PTRACE_ARCH_PRCTL |
229 | case PTRACE_ARCH_PRCTL: | 223 | case PTRACE_ARCH_PRCTL: |
230 | /* XXX Calls ptrace on the host - needs some SMP thinking */ | 224 | /* XXX Calls ptrace on the host - needs some SMP thinking */ |
231 | ret = arch_prctl(child, data, (void *) addr); | 225 | ret = arch_prctl(child, data, (void *) addr); |
232 | break; | 226 | break; |
233 | #endif | 227 | #endif |
234 | default: | 228 | default: |
235 | ret = ptrace_request(child, request, addr, data); | 229 | ret = ptrace_request(child, request, addr, data); |
@@ -255,7 +249,8 @@ void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, | |||
255 | force_sig_info(SIGTRAP, &info, tsk); | 249 | force_sig_info(SIGTRAP, &info, tsk); |
256 | } | 250 | } |
257 | 251 | ||
258 | /* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and | 252 | /* |
253 | * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and | ||
259 | * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check | 254 | * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check |
260 | */ | 255 | */ |
261 | void syscall_trace(struct uml_pt_regs *regs, int entryexit) | 256 | void syscall_trace(struct uml_pt_regs *regs, int entryexit) |
@@ -272,7 +267,7 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit) | |||
272 | UPT_SYSCALL_ARG3(regs), | 267 | UPT_SYSCALL_ARG3(regs), |
273 | UPT_SYSCALL_ARG4(regs)); | 268 | UPT_SYSCALL_ARG4(regs)); |
274 | else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)), | 269 | else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)), |
275 | UPT_SYSCALL_RET(regs)); | 270 | UPT_SYSCALL_RET(regs)); |
276 | } | 271 | } |
277 | 272 | ||
278 | /* Fake a debug trap */ | 273 | /* Fake a debug trap */ |
@@ -285,15 +280,18 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit) | |||
285 | if (!(current->ptrace & PT_PTRACED)) | 280 | if (!(current->ptrace & PT_PTRACED)) |
286 | return; | 281 | return; |
287 | 282 | ||
288 | /* the 0x80 provides a way for the tracing parent to distinguish | 283 | /* |
289 | between a syscall stop and SIGTRAP delivery */ | 284 | * the 0x80 provides a way for the tracing parent to distinguish |
285 | * between a syscall stop and SIGTRAP delivery | ||
286 | */ | ||
290 | tracesysgood = (current->ptrace & PT_TRACESYSGOOD); | 287 | tracesysgood = (current->ptrace & PT_TRACESYSGOOD); |
291 | ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0)); | 288 | ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0)); |
292 | 289 | ||
293 | if (entryexit) /* force do_signal() --> is_syscall() */ | 290 | if (entryexit) /* force do_signal() --> is_syscall() */ |
294 | set_thread_flag(TIF_SIGPENDING); | 291 | set_thread_flag(TIF_SIGPENDING); |
295 | 292 | ||
296 | /* this isn't the same as continuing with a signal, but it will do | 293 | /* |
294 | * this isn't the same as continuing with a signal, but it will do | ||
297 | * for normal use. strace only continues with a signal if the | 295 | * for normal use. strace only continues with a signal if the |
298 | * stopping signal is not SIGTRAP. -brl | 296 | * stopping signal is not SIGTRAP. -brl |
299 | */ | 297 | */ |
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index f3bd18bbf07f..9d8eea47a0fc 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c | |||
@@ -1,13 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/module.h" | ||
7 | #include "linux/sched.h" | 6 | #include "linux/sched.h" |
8 | #include "asm/smp.h" | ||
9 | #include "kern_util.h" | ||
10 | #include "kern.h" | ||
11 | #include "os.h" | 7 | #include "os.h" |
12 | #include "skas.h" | 8 | #include "skas.h" |
13 | 9 | ||
@@ -37,20 +33,20 @@ static void kill_off_processes(void) | |||
37 | 33 | ||
38 | void uml_cleanup(void) | 34 | void uml_cleanup(void) |
39 | { | 35 | { |
40 | kmalloc_ok = 0; | 36 | kmalloc_ok = 0; |
41 | do_uml_exitcalls(); | 37 | do_uml_exitcalls(); |
42 | kill_off_processes(); | 38 | kill_off_processes(); |
43 | } | 39 | } |
44 | 40 | ||
45 | void machine_restart(char * __unused) | 41 | void machine_restart(char * __unused) |
46 | { | 42 | { |
47 | uml_cleanup(); | 43 | uml_cleanup(); |
48 | reboot_skas(); | 44 | reboot_skas(); |
49 | } | 45 | } |
50 | 46 | ||
51 | void machine_power_off(void) | 47 | void machine_power_off(void) |
52 | { | 48 | { |
53 | uml_cleanup(); | 49 | uml_cleanup(); |
54 | halt_skas(); | 50 | halt_skas(); |
55 | } | 51 | } |
56 | 52 | ||
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 4dab7e417ba9..19cb97733937 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c | |||
@@ -1,27 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/stddef.h" | ||
7 | #include "linux/sys.h" | ||
8 | #include "linux/sched.h" | ||
9 | #include "linux/wait.h" | ||
10 | #include "linux/kernel.h" | ||
11 | #include "linux/smp_lock.h" | ||
12 | #include "linux/module.h" | 6 | #include "linux/module.h" |
13 | #include "linux/slab.h" | ||
14 | #include "linux/tty.h" | ||
15 | #include "linux/binfmts.h" | ||
16 | #include "linux/ptrace.h" | 7 | #include "linux/ptrace.h" |
8 | #include "linux/sched.h" | ||
9 | #include "asm/siginfo.h" | ||
17 | #include "asm/signal.h" | 10 | #include "asm/signal.h" |
18 | #include "asm/uaccess.h" | ||
19 | #include "asm/unistd.h" | 11 | #include "asm/unistd.h" |
20 | #include "asm/ucontext.h" | ||
21 | #include "kern_util.h" | ||
22 | #include "signal_kern.h" | ||
23 | #include "kern.h" | ||
24 | #include "frame_kern.h" | 12 | #include "frame_kern.h" |
13 | #include "kern_util.h" | ||
25 | #include "sigcontext.h" | 14 | #include "sigcontext.h" |
26 | 15 | ||
27 | EXPORT_SYMBOL(block_signals); | 16 | EXPORT_SYMBOL(block_signals); |
@@ -45,9 +34,9 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr, | |||
45 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 34 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
46 | 35 | ||
47 | /* Did we come from a system call? */ | 36 | /* Did we come from a system call? */ |
48 | if(PT_REGS_SYSCALL_NR(regs) >= 0){ | 37 | if (PT_REGS_SYSCALL_NR(regs) >= 0) { |
49 | /* If so, check system call restarting.. */ | 38 | /* If so, check system call restarting.. */ |
50 | switch(PT_REGS_SYSCALL_RET(regs)){ | 39 | switch(PT_REGS_SYSCALL_RET(regs)) { |
51 | case -ERESTART_RESTARTBLOCK: | 40 | case -ERESTART_RESTARTBLOCK: |
52 | case -ERESTARTNOHAND: | 41 | case -ERESTARTNOHAND: |
53 | PT_REGS_SYSCALL_RET(regs) = -EINTR; | 42 | PT_REGS_SYSCALL_RET(regs) = -EINTR; |
@@ -67,17 +56,17 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr, | |||
67 | } | 56 | } |
68 | 57 | ||
69 | sp = PT_REGS_SP(regs); | 58 | sp = PT_REGS_SP(regs); |
70 | if((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) | 59 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) |
71 | sp = current->sas_ss_sp + current->sas_ss_size; | 60 | sp = current->sas_ss_sp + current->sas_ss_size; |
72 | 61 | ||
73 | #ifdef CONFIG_ARCH_HAS_SC_SIGNALS | 62 | #ifdef CONFIG_ARCH_HAS_SC_SIGNALS |
74 | if(!(ka->sa.sa_flags & SA_SIGINFO)) | 63 | if (!(ka->sa.sa_flags & SA_SIGINFO)) |
75 | err = setup_signal_stack_sc(sp, signr, ka, regs, oldset); | 64 | err = setup_signal_stack_sc(sp, signr, ka, regs, oldset); |
76 | else | 65 | else |
77 | #endif | 66 | #endif |
78 | err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset); | 67 | err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset); |
79 | 68 | ||
80 | if(err){ | 69 | if (err) { |
81 | spin_lock_irq(¤t->sighand->siglock); | 70 | spin_lock_irq(¤t->sighand->siglock); |
82 | current->blocked = *oldset; | 71 | current->blocked = *oldset; |
83 | recalc_sigpending(); | 72 | recalc_sigpending(); |
@@ -87,7 +76,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr, | |||
87 | spin_lock_irq(¤t->sighand->siglock); | 76 | spin_lock_irq(¤t->sighand->siglock); |
88 | sigorsets(¤t->blocked, ¤t->blocked, | 77 | sigorsets(¤t->blocked, ¤t->blocked, |
89 | &ka->sa.sa_mask); | 78 | &ka->sa.sa_mask); |
90 | if(!(ka->sa.sa_flags & SA_NODEFER)) | 79 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
91 | sigaddset(¤t->blocked, signr); | 80 | sigaddset(¤t->blocked, signr); |
92 | recalc_sigpending(); | 81 | recalc_sigpending(); |
93 | spin_unlock_irq(¤t->sighand->siglock); | 82 | spin_unlock_irq(¤t->sighand->siglock); |
@@ -108,14 +97,16 @@ static int kern_do_signal(struct pt_regs *regs) | |||
108 | else | 97 | else |
109 | oldset = ¤t->blocked; | 98 | oldset = ¤t->blocked; |
110 | 99 | ||
111 | while((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0){ | 100 | while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) { |
112 | handled_sig = 1; | 101 | handled_sig = 1; |
113 | /* Whee! Actually deliver the signal. */ | 102 | /* Whee! Actually deliver the signal. */ |
114 | if(!handle_signal(regs, sig, &ka_copy, &info, oldset)){ | 103 | if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) { |
115 | /* a signal was successfully delivered; the saved | 104 | /* |
105 | * a signal was successfully delivered; the saved | ||
116 | * sigmask will have been stored in the signal frame, | 106 | * sigmask will have been stored in the signal frame, |
117 | * and will be restored by sigreturn, so we can simply | 107 | * and will be restored by sigreturn, so we can simply |
118 | * clear the TIF_RESTORE_SIGMASK flag */ | 108 | * clear the TIF_RESTORE_SIGMASK flag |
109 | */ | ||
119 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 110 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
120 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 111 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
121 | break; | 112 | break; |
@@ -123,9 +114,9 @@ static int kern_do_signal(struct pt_regs *regs) | |||
123 | } | 114 | } |
124 | 115 | ||
125 | /* Did we come from a system call? */ | 116 | /* Did we come from a system call? */ |
126 | if(!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)){ | 117 | if (!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)) { |
127 | /* Restart the system call - no handlers present */ | 118 | /* Restart the system call - no handlers present */ |
128 | switch(PT_REGS_SYSCALL_RET(regs)){ | 119 | switch(PT_REGS_SYSCALL_RET(regs)) { |
129 | case -ERESTARTNOHAND: | 120 | case -ERESTARTNOHAND: |
130 | case -ERESTARTSYS: | 121 | case -ERESTARTSYS: |
131 | case -ERESTARTNOINTR: | 122 | case -ERESTARTNOINTR: |
@@ -136,22 +127,25 @@ static int kern_do_signal(struct pt_regs *regs) | |||
136 | PT_REGS_ORIG_SYSCALL(regs) = __NR_restart_syscall; | 127 | PT_REGS_ORIG_SYSCALL(regs) = __NR_restart_syscall; |
137 | PT_REGS_RESTART_SYSCALL(regs); | 128 | PT_REGS_RESTART_SYSCALL(regs); |
138 | break; | 129 | break; |
139 | } | 130 | } |
140 | } | 131 | } |
141 | 132 | ||
142 | /* This closes a way to execute a system call on the host. If | 133 | /* |
134 | * This closes a way to execute a system call on the host. If | ||
143 | * you set a breakpoint on a system call instruction and singlestep | 135 | * you set a breakpoint on a system call instruction and singlestep |
144 | * from it, the tracing thread used to PTRACE_SINGLESTEP the process | 136 | * from it, the tracing thread used to PTRACE_SINGLESTEP the process |
145 | * rather than PTRACE_SYSCALL it, allowing the system call to execute | 137 | * rather than PTRACE_SYSCALL it, allowing the system call to execute |
146 | * on the host. The tracing thread will check this flag and | 138 | * on the host. The tracing thread will check this flag and |
147 | * PTRACE_SYSCALL if necessary. | 139 | * PTRACE_SYSCALL if necessary. |
148 | */ | 140 | */ |
149 | if(current->ptrace & PT_DTRACE) | 141 | if (current->ptrace & PT_DTRACE) |
150 | current->thread.singlestep_syscall = | 142 | current->thread.singlestep_syscall = |
151 | is_syscall(PT_REGS_IP(¤t->thread.regs)); | 143 | is_syscall(PT_REGS_IP(¤t->thread.regs)); |
152 | 144 | ||
153 | /* if there's no signal to deliver, we just put the saved sigmask | 145 | /* |
154 | * back */ | 146 | * if there's no signal to deliver, we just put the saved sigmask |
147 | * back | ||
148 | */ | ||
155 | if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) { | 149 | if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) { |
156 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 150 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
157 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 151 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile index b2823cdd783e..0b76d8869c94 100644 --- a/arch/um/kernel/skas/Makefile +++ b/arch/um/kernel/skas/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com) | 2 | # Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | # Licensed under the GPL | 3 | # Licensed under the GPL |
4 | # | 4 | # |
5 | 5 | ||
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 902d74138952..c5475ecd9fd4 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -1,20 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sched.h" | ||
7 | #include "linux/list.h" | ||
8 | #include "linux/spinlock.h" | ||
9 | #include "linux/slab.h" | ||
10 | #include "linux/errno.h" | ||
11 | #include "linux/mm.h" | 6 | #include "linux/mm.h" |
12 | #include "asm/current.h" | 7 | #include "linux/sched.h" |
13 | #include "asm/segment.h" | ||
14 | #include "asm/mmu.h" | ||
15 | #include "asm/pgalloc.h" | 8 | #include "asm/pgalloc.h" |
16 | #include "asm/pgtable.h" | 9 | #include "asm/pgtable.h" |
17 | #include "asm/ldt.h" | ||
18 | #include "os.h" | 10 | #include "os.h" |
19 | #include "skas.h" | 11 | #include "skas.h" |
20 | 12 | ||
@@ -41,10 +33,11 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | |||
41 | if (!pte) | 33 | if (!pte) |
42 | goto out_pte; | 34 | goto out_pte; |
43 | 35 | ||
44 | /* There's an interaction between the skas0 stub pages, stack | 36 | /* |
37 | * There's an interaction between the skas0 stub pages, stack | ||
45 | * randomization, and the BUG at the end of exit_mmap. exit_mmap | 38 | * randomization, and the BUG at the end of exit_mmap. exit_mmap |
46 | * checks that the number of page tables freed is the same as had | 39 | * checks that the number of page tables freed is the same as had |
47 | * been allocated. If the stack is on the last page table page, | 40 | * been allocated. If the stack is on the last page table page, |
48 | * then the stack pte page will be freed, and if not, it won't. To | 41 | * then the stack pte page will be freed, and if not, it won't. To |
49 | * avoid having to know where the stack is, or if the process mapped | 42 | * avoid having to know where the stack is, or if the process mapped |
50 | * something at the top of its address space for some other reason, | 43 | * something at the top of its address space for some other reason, |
@@ -54,36 +47,37 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | |||
54 | * destroy_context_skas. | 47 | * destroy_context_skas. |
55 | */ | 48 | */ |
56 | 49 | ||
57 | mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); | 50 | mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); |
58 | #ifdef CONFIG_3_LEVEL_PGTABLES | 51 | #ifdef CONFIG_3_LEVEL_PGTABLES |
59 | mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); | 52 | mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); |
60 | #endif | 53 | #endif |
61 | 54 | ||
62 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); | 55 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
63 | *pte = pte_mkread(*pte); | 56 | *pte = pte_mkread(*pte); |
64 | return(0); | 57 | return 0; |
65 | 58 | ||
66 | out_pmd: | 59 | out_pmd: |
67 | pud_free(pud); | 60 | pud_free(pud); |
68 | out_pte: | 61 | out_pte: |
69 | pmd_free(pmd); | 62 | pmd_free(pmd); |
70 | out: | 63 | out: |
71 | return(-ENOMEM); | 64 | return -ENOMEM; |
72 | } | 65 | } |
73 | 66 | ||
74 | int init_new_context(struct task_struct *task, struct mm_struct *mm) | 67 | int init_new_context(struct task_struct *task, struct mm_struct *mm) |
75 | { | 68 | { |
76 | struct mmu_context_skas *from_mm = NULL; | 69 | struct mmu_context_skas *from_mm = NULL; |
77 | struct mmu_context_skas *to_mm = &mm->context.skas; | 70 | struct mmu_context_skas *to_mm = &mm->context.skas; |
78 | unsigned long stack = 0; | 71 | unsigned long stack = 0; |
79 | int ret = -ENOMEM; | 72 | int ret = -ENOMEM; |
80 | 73 | ||
81 | if(skas_needs_stub){ | 74 | if (skas_needs_stub) { |
82 | stack = get_zeroed_page(GFP_KERNEL); | 75 | stack = get_zeroed_page(GFP_KERNEL); |
83 | if(stack == 0) | 76 | if (stack == 0) |
84 | goto out; | 77 | goto out; |
85 | 78 | ||
86 | /* This zeros the entry that pgd_alloc didn't, needed since | 79 | /* |
80 | * This zeros the entry that pgd_alloc didn't, needed since | ||
87 | * we are about to reinitialize it, and want mm.nr_ptes to | 81 | * we are about to reinitialize it, and want mm.nr_ptes to |
88 | * be accurate. | 82 | * be accurate. |
89 | */ | 83 | */ |
@@ -91,39 +85,39 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
91 | 85 | ||
92 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, | 86 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, |
93 | (unsigned long) &__syscall_stub_start); | 87 | (unsigned long) &__syscall_stub_start); |
94 | if(ret) | 88 | if (ret) |
95 | goto out_free; | 89 | goto out_free; |
96 | 90 | ||
97 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); | 91 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); |
98 | if(ret) | 92 | if (ret) |
99 | goto out_free; | 93 | goto out_free; |
100 | 94 | ||
101 | mm->nr_ptes--; | 95 | mm->nr_ptes--; |
102 | } | 96 | } |
103 | 97 | ||
104 | to_mm->id.stack = stack; | 98 | to_mm->id.stack = stack; |
105 | if(current->mm != NULL && current->mm != &init_mm) | 99 | if (current->mm != NULL && current->mm != &init_mm) |
106 | from_mm = ¤t->mm->context.skas; | 100 | from_mm = ¤t->mm->context.skas; |
107 | 101 | ||
108 | if(proc_mm){ | 102 | if (proc_mm) { |
109 | ret = new_mm(stack); | 103 | ret = new_mm(stack); |
110 | if(ret < 0){ | 104 | if (ret < 0) { |
111 | printk("init_new_context_skas - new_mm failed, " | 105 | printk(KERN_ERR "init_new_context_skas - " |
112 | "errno = %d\n", ret); | 106 | "new_mm failed, errno = %d\n", ret); |
113 | goto out_free; | 107 | goto out_free; |
114 | } | 108 | } |
115 | to_mm->id.u.mm_fd = ret; | 109 | to_mm->id.u.mm_fd = ret; |
116 | } | 110 | } |
117 | else { | 111 | else { |
118 | if(from_mm) | 112 | if (from_mm) |
119 | to_mm->id.u.pid = copy_context_skas0(stack, | 113 | to_mm->id.u.pid = copy_context_skas0(stack, |
120 | from_mm->id.u.pid); | 114 | from_mm->id.u.pid); |
121 | else to_mm->id.u.pid = start_userspace(stack); | 115 | else to_mm->id.u.pid = start_userspace(stack); |
122 | } | 116 | } |
123 | 117 | ||
124 | ret = init_new_ldt(to_mm, from_mm); | 118 | ret = init_new_ldt(to_mm, from_mm); |
125 | if(ret < 0){ | 119 | if (ret < 0) { |
126 | printk("init_new_context_skas - init_ldt" | 120 | printk(KERN_ERR "init_new_context_skas - init_ldt" |
127 | " failed, errno = %d\n", ret); | 121 | " failed, errno = %d\n", ret); |
128 | goto out_free; | 122 | goto out_free; |
129 | } | 123 | } |
@@ -131,7 +125,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
131 | return 0; | 125 | return 0; |
132 | 126 | ||
133 | out_free: | 127 | out_free: |
134 | if(to_mm->id.stack != 0) | 128 | if (to_mm->id.stack != 0) |
135 | free_page(to_mm->id.stack); | 129 | free_page(to_mm->id.stack); |
136 | out: | 130 | out: |
137 | return ret; | 131 | return ret; |
@@ -141,12 +135,12 @@ void destroy_context(struct mm_struct *mm) | |||
141 | { | 135 | { |
142 | struct mmu_context_skas *mmu = &mm->context.skas; | 136 | struct mmu_context_skas *mmu = &mm->context.skas; |
143 | 137 | ||
144 | if(proc_mm) | 138 | if (proc_mm) |
145 | os_close_file(mmu->id.u.mm_fd); | 139 | os_close_file(mmu->id.u.mm_fd); |
146 | else | 140 | else |
147 | os_kill_ptraced_process(mmu->id.u.pid, 1); | 141 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
148 | 142 | ||
149 | if(!proc_mm || !ptrace_faultinfo){ | 143 | if (!proc_mm || !ptrace_faultinfo) { |
150 | free_page(mmu->id.stack); | 144 | free_page(mmu->id.stack); |
151 | pte_lock_deinit(virt_to_page(mmu->last_page_table)); | 145 | pte_lock_deinit(virt_to_page(mmu->last_page_table)); |
152 | pte_free_kernel((pte_t *) mmu->last_page_table); | 146 | pte_free_kernel((pte_t *) mmu->last_page_table); |
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index dabae62d52be..9ce1c49421f8 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c | |||
@@ -1,36 +1,23 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sched.h" | ||
7 | #include "linux/slab.h" | ||
8 | #include "linux/ptrace.h" | ||
9 | #include "linux/proc_fs.h" | ||
10 | #include "linux/file.h" | ||
11 | #include "linux/errno.h" | ||
12 | #include "linux/init.h" | 6 | #include "linux/init.h" |
13 | #include "asm/uaccess.h" | 7 | #include "linux/sched.h" |
14 | #include "asm/atomic.h" | ||
15 | #include "kern_util.h" | ||
16 | #include "as-layout.h" | 8 | #include "as-layout.h" |
17 | #include "skas.h" | ||
18 | #include "os.h" | 9 | #include "os.h" |
19 | #include "tlb.h" | 10 | #include "skas.h" |
20 | #include "kern.h" | ||
21 | #include "registers.h" | ||
22 | |||
23 | extern void schedule_tail(struct task_struct *prev); | ||
24 | 11 | ||
25 | int new_mm(unsigned long stack) | 12 | int new_mm(unsigned long stack) |
26 | { | 13 | { |
27 | int fd; | 14 | int fd; |
28 | 15 | ||
29 | fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); | 16 | fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); |
30 | if(fd < 0) | 17 | if (fd < 0) |
31 | return fd; | 18 | return fd; |
32 | 19 | ||
33 | if(skas_needs_stub) | 20 | if (skas_needs_stub) |
34 | map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); | 21 | map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); |
35 | 22 | ||
36 | return fd; | 23 | return fd; |
@@ -62,7 +49,7 @@ int __init start_uml(void) | |||
62 | { | 49 | { |
63 | stack_protections((unsigned long) &cpu0_irqstack); | 50 | stack_protections((unsigned long) &cpu0_irqstack); |
64 | set_sigstack(cpu0_irqstack, THREAD_SIZE); | 51 | set_sigstack(cpu0_irqstack, THREAD_SIZE); |
65 | if(proc_mm) | 52 | if (proc_mm) |
66 | userspace_pid[0] = start_userspace(0); | 53 | userspace_pid[0] = start_userspace(0); |
67 | 54 | ||
68 | init_new_thread_signals(); | 55 | init_new_thread_signals(); |
@@ -75,7 +62,7 @@ int __init start_uml(void) | |||
75 | 62 | ||
76 | unsigned long current_stub_stack(void) | 63 | unsigned long current_stub_stack(void) |
77 | { | 64 | { |
78 | if(current->mm == NULL) | 65 | if (current->mm == NULL) |
79 | return 0; | 66 | return 0; |
80 | 67 | ||
81 | return current->mm->context.skas.id.stack; | 68 | return current->mm->context.skas.id.stack; |
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c index e183da633c89..8582c1331048 100644 --- a/arch/um/kernel/skas/syscall.c +++ b/arch/um/kernel/skas/syscall.c | |||
@@ -1,17 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sys.h" | 6 | #include "linux/kernel.h" |
7 | #include "linux/ptrace.h" | 7 | #include "linux/ptrace.h" |
8 | #include "asm/errno.h" | ||
9 | #include "asm/unistd.h" | ||
10 | #include "asm/ptrace.h" | ||
11 | #include "asm/current.h" | ||
12 | #include "sysdep/syscalls.h" | ||
13 | #include "kern_util.h" | 8 | #include "kern_util.h" |
14 | #include "syscall.h" | 9 | #include "sysdep/ptrace.h" |
10 | #include "sysdep/syscalls.h" | ||
15 | 11 | ||
16 | void handle_syscall(struct uml_pt_regs *r) | 12 | void handle_syscall(struct uml_pt_regs *r) |
17 | { | 13 | { |
@@ -24,7 +20,8 @@ void handle_syscall(struct uml_pt_regs *r) | |||
24 | current->thread.nsyscalls++; | 20 | current->thread.nsyscalls++; |
25 | nsyscalls++; | 21 | nsyscalls++; |
26 | 22 | ||
27 | /* This should go in the declaration of syscall, but when I do that, | 23 | /* |
24 | * This should go in the declaration of syscall, but when I do that, | ||
28 | * strace -f -c bash -c 'ls ; ls' breaks, sometimes not tracing | 25 | * strace -f -c bash -c 'ls ; ls' breaks, sometimes not tracing |
29 | * children at all, sometimes hanging when bash doesn't see the first | 26 | * children at all, sometimes hanging when bash doesn't see the first |
30 | * ls exit. | 27 | * ls exit. |
@@ -33,7 +30,7 @@ void handle_syscall(struct uml_pt_regs *r) | |||
33 | * in case it's a compiler bug. | 30 | * in case it's a compiler bug. |
34 | */ | 31 | */ |
35 | syscall = UPT_SYSCALL_NR(r); | 32 | syscall = UPT_SYSCALL_NR(r); |
36 | if((syscall >= NR_syscalls) || (syscall < 0)) | 33 | if ((syscall >= NR_syscalls) || (syscall < 0)) |
37 | result = -ENOSYS; | 34 | result = -ENOSYS; |
38 | else result = EXECUTE_SYSCALL(syscall, regs); | 35 | else result = EXECUTE_SYSCALL(syscall, regs); |
39 | 36 | ||
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index ebb29f5259a9..b9d92b2089ae 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c | |||
@@ -1,25 +1,17 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sched.h" | ||
7 | #include "linux/file.h" | 6 | #include "linux/file.h" |
8 | #include "linux/smp_lock.h" | ||
9 | #include "linux/mm.h" | ||
10 | #include "linux/fs.h" | 7 | #include "linux/fs.h" |
8 | #include "linux/mm.h" | ||
9 | #include "linux/sched.h" | ||
11 | #include "linux/utsname.h" | 10 | #include "linux/utsname.h" |
12 | #include "linux/msg.h" | 11 | #include "asm/current.h" |
13 | #include "linux/shm.h" | ||
14 | #include "linux/sys.h" | ||
15 | #include "linux/syscalls.h" | ||
16 | #include "linux/unistd.h" | ||
17 | #include "linux/slab.h" | ||
18 | #include "linux/utime.h" | ||
19 | #include "asm/mman.h" | 12 | #include "asm/mman.h" |
20 | #include "asm/uaccess.h" | 13 | #include "asm/uaccess.h" |
21 | #include "kern_util.h" | 14 | #include "asm/unistd.h" |
22 | #include "sysdep/syscalls.h" | ||
23 | 15 | ||
24 | /* Unlocked, I don't care if this is a bit off */ | 16 | /* Unlocked, I don't care if this is a bit off */ |
25 | int nsyscalls = 0; | 17 | int nsyscalls = 0; |
@@ -32,7 +24,7 @@ long sys_fork(void) | |||
32 | ret = do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs), | 24 | ret = do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs), |
33 | ¤t->thread.regs, 0, NULL, NULL); | 25 | ¤t->thread.regs, 0, NULL, NULL); |
34 | current->thread.forking = 0; | 26 | current->thread.forking = 0; |
35 | return(ret); | 27 | return ret; |
36 | } | 28 | } |
37 | 29 | ||
38 | long sys_vfork(void) | 30 | long sys_vfork(void) |
@@ -44,7 +36,7 @@ long sys_vfork(void) | |||
44 | UPT_SP(¤t->thread.regs.regs), | 36 | UPT_SP(¤t->thread.regs.regs), |
45 | ¤t->thread.regs, 0, NULL, NULL); | 37 | ¤t->thread.regs, 0, NULL, NULL); |
46 | current->thread.forking = 0; | 38 | current->thread.forking = 0; |
47 | return(ret); | 39 | return ret; |
48 | } | 40 | } |
49 | 41 | ||
50 | /* common code for old and new mmaps */ | 42 | /* common code for old and new mmaps */ |
@@ -90,15 +82,15 @@ long old_mmap(unsigned long addr, unsigned long len, | |||
90 | */ | 82 | */ |
91 | long sys_pipe(unsigned long __user * fildes) | 83 | long sys_pipe(unsigned long __user * fildes) |
92 | { | 84 | { |
93 | int fd[2]; | 85 | int fd[2]; |
94 | long error; | 86 | long error; |
95 | 87 | ||
96 | error = do_pipe(fd); | 88 | error = do_pipe(fd); |
97 | if (!error) { | 89 | if (!error) { |
98 | if (copy_to_user(fildes, fd, sizeof(fd))) | 90 | if (copy_to_user(fildes, fd, sizeof(fd))) |
99 | error = -EFAULT; | 91 | error = -EFAULT; |
100 | } | 92 | } |
101 | return error; | 93 | return error; |
102 | } | 94 | } |
103 | 95 | ||
104 | 96 | ||
@@ -122,7 +114,7 @@ long sys_olduname(struct oldold_utsname __user * name) | |||
122 | if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) | 114 | if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) |
123 | return -EFAULT; | 115 | return -EFAULT; |
124 | 116 | ||
125 | down_read(&uts_sem); | 117 | down_read(&uts_sem); |
126 | 118 | ||
127 | error = __copy_to_user(&name->sysname, &utsname()->sysname, | 119 | error = __copy_to_user(&name->sysname, &utsname()->sysname, |
128 | __OLD_UTS_LEN); | 120 | __OLD_UTS_LEN); |
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 90e24e2dbeaa..4fc8c2586b70 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c | |||
@@ -1,28 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/kernel.h" | ||
7 | #include "linux/module.h" | ||
8 | #include "linux/unistd.h" | ||
9 | #include "linux/stddef.h" | ||
10 | #include "linux/spinlock.h" | ||
11 | #include "linux/time.h" | ||
12 | #include "linux/sched.h" | ||
13 | #include "linux/interrupt.h" | 6 | #include "linux/interrupt.h" |
14 | #include "linux/init.h" | 7 | #include "linux/jiffies.h" |
15 | #include "linux/delay.h" | 8 | #include "linux/threads.h" |
16 | #include "linux/hrtimer.h" | ||
17 | #include "asm/irq.h" | 9 | #include "asm/irq.h" |
18 | #include "asm/param.h" | 10 | #include "asm/param.h" |
19 | #include "asm/current.h" | ||
20 | #include "kern_util.h" | 11 | #include "kern_util.h" |
21 | #include "os.h" | 12 | #include "os.h" |
22 | 13 | ||
23 | int hz(void) | 14 | int hz(void) |
24 | { | 15 | { |
25 | return(HZ); | 16 | return HZ; |
26 | } | 17 | } |
27 | 18 | ||
28 | /* | 19 | /* |
@@ -43,7 +34,7 @@ void timer_irq(struct uml_pt_regs *regs) | |||
43 | unsigned long long ticks = 0; | 34 | unsigned long long ticks = 0; |
44 | #ifdef CONFIG_UML_REAL_TIME_CLOCK | 35 | #ifdef CONFIG_UML_REAL_TIME_CLOCK |
45 | int c = cpu(); | 36 | int c = cpu(); |
46 | if(prev_nsecs[c]){ | 37 | if (prev_nsecs[c]) { |
47 | /* We've had 1 tick */ | 38 | /* We've had 1 tick */ |
48 | unsigned long long nsecs = os_nsecs(); | 39 | unsigned long long nsecs = os_nsecs(); |
49 | 40 | ||
@@ -51,7 +42,7 @@ void timer_irq(struct uml_pt_regs *regs) | |||
51 | prev_nsecs[c] = nsecs; | 42 | prev_nsecs[c] = nsecs; |
52 | 43 | ||
53 | /* Protect against the host clock being set backwards */ | 44 | /* Protect against the host clock being set backwards */ |
54 | if(delta[c] < 0) | 45 | if (delta[c] < 0) |
55 | delta[c] = 0; | 46 | delta[c] = 0; |
56 | 47 | ||
57 | ticks += (delta[c] * HZ) / BILLION; | 48 | ticks += (delta[c] * HZ) / BILLION; |
@@ -61,7 +52,7 @@ void timer_irq(struct uml_pt_regs *regs) | |||
61 | #else | 52 | #else |
62 | ticks = 1; | 53 | ticks = 1; |
63 | #endif | 54 | #endif |
64 | while(ticks > 0){ | 55 | while (ticks > 0) { |
65 | do_IRQ(TIMER_IRQ, regs); | 56 | do_IRQ(TIMER_IRQ, regs); |
66 | ticks--; | 57 | ticks--; |
67 | } | 58 | } |
@@ -112,12 +103,12 @@ static void register_timer(void) | |||
112 | int err; | 103 | int err; |
113 | 104 | ||
114 | err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL); | 105 | err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL); |
115 | if(err != 0) | 106 | if (err != 0) |
116 | printk(KERN_ERR "register_timer : request_irq failed - " | 107 | printk(KERN_ERR "register_timer : request_irq failed - " |
117 | "errno = %d\n", -err); | 108 | "errno = %d\n", -err); |
118 | 109 | ||
119 | err = set_interval(1); | 110 | err = set_interval(1); |
120 | if(err != 0) | 111 | if (err != 0) |
121 | printk(KERN_ERR "register_timer : set_interval failed - " | 112 | printk(KERN_ERR "register_timer : set_interval failed - " |
122 | "errno = %d\n", -err); | 113 | "errno = %d\n", -err); |
123 | } | 114 | } |
@@ -144,7 +135,8 @@ void do_gettimeofday(struct timeval *tv) | |||
144 | xtime.tv_nsec; | 135 | xtime.tv_nsec; |
145 | #endif | 136 | #endif |
146 | tv->tv_sec = nsecs / NSEC_PER_SEC; | 137 | tv->tv_sec = nsecs / NSEC_PER_SEC; |
147 | /* Careful about calculations here - this was originally done as | 138 | /* |
139 | * Careful about calculations here - this was originally done as | ||
148 | * (nsecs - tv->tv_sec * NSEC_PER_SEC) / NSEC_PER_USEC | 140 | * (nsecs - tv->tv_sec * NSEC_PER_SEC) / NSEC_PER_USEC |
149 | * which gave bogus (> 1000000) values. Dunno why, suspect gcc | 141 | * which gave bogus (> 1000000) values. Dunno why, suspect gcc |
150 | * (4.0.0) miscompiled it, or there's a subtle 64/32-bit conversion | 142 | * (4.0.0) miscompiled it, or there's a subtle 64/32-bit conversion |
@@ -176,7 +168,7 @@ int do_settimeofday(struct timespec *tv) | |||
176 | 168 | ||
177 | void timer_handler(int sig, struct uml_pt_regs *regs) | 169 | void timer_handler(int sig, struct uml_pt_regs *regs) |
178 | { | 170 | { |
179 | if(current_thread->cpu == 0) | 171 | if (current_thread->cpu == 0) |
180 | timer_irq(regs); | 172 | timer_irq(regs); |
181 | local_irq_disable(); | 173 | local_irq_disable(); |
182 | irq_enter(); | 174 | irq_enter(); |
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 12b8c637527d..849922fcfb60 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -1,19 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/mm.h" | 6 | #include "linux/mm.h" |
7 | #include "asm/page.h" | ||
8 | #include "asm/pgalloc.h" | ||
9 | #include "asm/pgtable.h" | 7 | #include "asm/pgtable.h" |
10 | #include "asm/tlbflush.h" | 8 | #include "asm/tlbflush.h" |
11 | #include "as-layout.h" | 9 | #include "as-layout.h" |
12 | #include "tlb.h" | ||
13 | #include "mem.h" | ||
14 | #include "mem_user.h" | 10 | #include "mem_user.h" |
15 | #include "os.h" | 11 | #include "os.h" |
16 | #include "skas.h" | 12 | #include "skas.h" |
13 | #include "tlb.h" | ||
17 | 14 | ||
18 | static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | 15 | static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, |
19 | unsigned int prot, struct host_vm_op *ops, int *index, | 16 | unsigned int prot, struct host_vm_op *ops, int *index, |
@@ -26,18 +23,18 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | |||
26 | int fd, ret = 0; | 23 | int fd, ret = 0; |
27 | 24 | ||
28 | fd = phys_mapping(phys, &offset); | 25 | fd = phys_mapping(phys, &offset); |
29 | if(*index != -1){ | 26 | if (*index != -1) { |
30 | last = &ops[*index]; | 27 | last = &ops[*index]; |
31 | if((last->type == MMAP) && | 28 | if ((last->type == MMAP) && |
32 | (last->u.mmap.addr + last->u.mmap.len == virt) && | 29 | (last->u.mmap.addr + last->u.mmap.len == virt) && |
33 | (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && | 30 | (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && |
34 | (last->u.mmap.offset + last->u.mmap.len == offset)){ | 31 | (last->u.mmap.offset + last->u.mmap.len == offset)) { |
35 | last->u.mmap.len += len; | 32 | last->u.mmap.len += len; |
36 | return 0; | 33 | return 0; |
37 | } | 34 | } |
38 | } | 35 | } |
39 | 36 | ||
40 | if(*index == last_filled){ | 37 | if (*index == last_filled) { |
41 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); | 38 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); |
42 | *index = -1; | 39 | *index = -1; |
43 | } | 40 | } |
@@ -62,16 +59,16 @@ static int add_munmap(unsigned long addr, unsigned long len, | |||
62 | struct host_vm_op *last; | 59 | struct host_vm_op *last; |
63 | int ret = 0; | 60 | int ret = 0; |
64 | 61 | ||
65 | if(*index != -1){ | 62 | if (*index != -1) { |
66 | last = &ops[*index]; | 63 | last = &ops[*index]; |
67 | if((last->type == MUNMAP) && | 64 | if ((last->type == MUNMAP) && |
68 | (last->u.munmap.addr + last->u.mmap.len == addr)){ | 65 | (last->u.munmap.addr + last->u.mmap.len == addr)) { |
69 | last->u.munmap.len += len; | 66 | last->u.munmap.len += len; |
70 | return 0; | 67 | return 0; |
71 | } | 68 | } |
72 | } | 69 | } |
73 | 70 | ||
74 | if(*index == last_filled){ | 71 | if (*index == last_filled) { |
75 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); | 72 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); |
76 | *index = -1; | 73 | *index = -1; |
77 | } | 74 | } |
@@ -92,17 +89,17 @@ static int add_mprotect(unsigned long addr, unsigned long len, | |||
92 | struct host_vm_op *last; | 89 | struct host_vm_op *last; |
93 | int ret = 0; | 90 | int ret = 0; |
94 | 91 | ||
95 | if(*index != -1){ | 92 | if (*index != -1) { |
96 | last = &ops[*index]; | 93 | last = &ops[*index]; |
97 | if((last->type == MPROTECT) && | 94 | if ((last->type == MPROTECT) && |
98 | (last->u.mprotect.addr + last->u.mprotect.len == addr) && | 95 | (last->u.mprotect.addr + last->u.mprotect.len == addr) && |
99 | (last->u.mprotect.prot == prot)){ | 96 | (last->u.mprotect.prot == prot)) { |
100 | last->u.mprotect.len += len; | 97 | last->u.mprotect.len += len; |
101 | return 0; | 98 | return 0; |
102 | } | 99 | } |
103 | } | 100 | } |
104 | 101 | ||
105 | if(*index == last_filled){ | 102 | if (*index == last_filled) { |
106 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); | 103 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); |
107 | *index = -1; | 104 | *index = -1; |
108 | } | 105 | } |
@@ -141,15 +138,15 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
141 | } | 138 | } |
142 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | 139 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | |
143 | (x ? UM_PROT_EXEC : 0)); | 140 | (x ? UM_PROT_EXEC : 0)); |
144 | if(force || pte_newpage(*pte)){ | 141 | if (force || pte_newpage(*pte)) { |
145 | if(pte_present(*pte)) | 142 | if (pte_present(*pte)) |
146 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, | 143 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, |
147 | PAGE_SIZE, prot, ops, op_index, | 144 | PAGE_SIZE, prot, ops, op_index, |
148 | last_op, mmu, flush, do_ops); | 145 | last_op, mmu, flush, do_ops); |
149 | else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, | 146 | else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, |
150 | last_op, mmu, flush, do_ops); | 147 | last_op, mmu, flush, do_ops); |
151 | } | 148 | } |
152 | else if(pte_newprot(*pte)) | 149 | else if (pte_newprot(*pte)) |
153 | ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, | 150 | ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, |
154 | last_op, mmu, flush, do_ops); | 151 | last_op, mmu, flush, do_ops); |
155 | *pte = pte_mkuptodate(*pte); | 152 | *pte = pte_mkuptodate(*pte); |
@@ -172,8 +169,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr, | |||
172 | pmd = pmd_offset(pud, addr); | 169 | pmd = pmd_offset(pud, addr); |
173 | do { | 170 | do { |
174 | next = pmd_addr_end(addr, end); | 171 | next = pmd_addr_end(addr, end); |
175 | if(!pmd_present(*pmd)){ | 172 | if (!pmd_present(*pmd)) { |
176 | if(force || pmd_newpage(*pmd)){ | 173 | if (force || pmd_newpage(*pmd)) { |
177 | ret = add_munmap(addr, next - addr, ops, | 174 | ret = add_munmap(addr, next - addr, ops, |
178 | op_index, last_op, mmu, | 175 | op_index, last_op, mmu, |
179 | flush, do_ops); | 176 | flush, do_ops); |
@@ -202,8 +199,8 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr, | |||
202 | pud = pud_offset(pgd, addr); | 199 | pud = pud_offset(pgd, addr); |
203 | do { | 200 | do { |
204 | next = pud_addr_end(addr, end); | 201 | next = pud_addr_end(addr, end); |
205 | if(!pud_present(*pud)){ | 202 | if (!pud_present(*pud)) { |
206 | if(force || pud_newpage(*pud)){ | 203 | if (force || pud_newpage(*pud)) { |
207 | ret = add_munmap(addr, next - addr, ops, | 204 | ret = add_munmap(addr, next - addr, ops, |
208 | op_index, last_op, mmu, | 205 | op_index, last_op, mmu, |
209 | flush, do_ops); | 206 | flush, do_ops); |
@@ -233,8 +230,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
233 | pgd = pgd_offset(mm, addr); | 230 | pgd = pgd_offset(mm, addr); |
234 | do { | 231 | do { |
235 | next = pgd_addr_end(addr, end_addr); | 232 | next = pgd_addr_end(addr, end_addr); |
236 | if(!pgd_present(*pgd)){ | 233 | if (!pgd_present(*pgd)) { |
237 | if (force || pgd_newpage(*pgd)){ | 234 | if (force || pgd_newpage(*pgd)) { |
238 | ret = add_munmap(addr, next - addr, ops, | 235 | ret = add_munmap(addr, next - addr, ops, |
239 | &op_index, last_op, mmu, | 236 | &op_index, last_op, mmu, |
240 | &flush, do_ops); | 237 | &flush, do_ops); |
@@ -246,12 +243,13 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
246 | do_ops); | 243 | do_ops); |
247 | } while (pgd++, addr = next, ((addr != end_addr) && !ret)); | 244 | } while (pgd++, addr = next, ((addr != end_addr) && !ret)); |
248 | 245 | ||
249 | if(!ret) | 246 | if (!ret) |
250 | ret = (*do_ops)(mmu, ops, op_index, 1, &flush); | 247 | ret = (*do_ops)(mmu, ops, op_index, 1, &flush); |
251 | 248 | ||
252 | /* This is not an else because ret is modified above */ | 249 | /* This is not an else because ret is modified above */ |
253 | if(ret) { | 250 | if (ret) { |
254 | printk("fix_range_common: failed, killing current process\n"); | 251 | printk(KERN_ERR "fix_range_common: failed, killing current " |
252 | "process\n"); | ||
255 | force_sig(SIGKILL, current); | 253 | force_sig(SIGKILL, current); |
256 | } | 254 | } |
257 | } | 255 | } |
@@ -267,17 +265,17 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
267 | int updated = 0, err; | 265 | int updated = 0, err; |
268 | 266 | ||
269 | mm = &init_mm; | 267 | mm = &init_mm; |
270 | for(addr = start; addr < end;){ | 268 | for (addr = start; addr < end;) { |
271 | pgd = pgd_offset(mm, addr); | 269 | pgd = pgd_offset(mm, addr); |
272 | if(!pgd_present(*pgd)){ | 270 | if (!pgd_present(*pgd)) { |
273 | last = ADD_ROUND(addr, PGDIR_SIZE); | 271 | last = ADD_ROUND(addr, PGDIR_SIZE); |
274 | if(last > end) | 272 | if (last > end) |
275 | last = end; | 273 | last = end; |
276 | if(pgd_newpage(*pgd)){ | 274 | if (pgd_newpage(*pgd)) { |
277 | updated = 1; | 275 | updated = 1; |
278 | err = os_unmap_memory((void *) addr, | 276 | err = os_unmap_memory((void *) addr, |
279 | last - addr); | 277 | last - addr); |
280 | if(err < 0) | 278 | if (err < 0) |
281 | panic("munmap failed, errno = %d\n", | 279 | panic("munmap failed, errno = %d\n", |
282 | -err); | 280 | -err); |
283 | } | 281 | } |
@@ -286,15 +284,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
286 | } | 284 | } |
287 | 285 | ||
288 | pud = pud_offset(pgd, addr); | 286 | pud = pud_offset(pgd, addr); |
289 | if(!pud_present(*pud)){ | 287 | if (!pud_present(*pud)) { |
290 | last = ADD_ROUND(addr, PUD_SIZE); | 288 | last = ADD_ROUND(addr, PUD_SIZE); |
291 | if(last > end) | 289 | if (last > end) |
292 | last = end; | 290 | last = end; |
293 | if(pud_newpage(*pud)){ | 291 | if (pud_newpage(*pud)) { |
294 | updated = 1; | 292 | updated = 1; |
295 | err = os_unmap_memory((void *) addr, | 293 | err = os_unmap_memory((void *) addr, |
296 | last - addr); | 294 | last - addr); |
297 | if(err < 0) | 295 | if (err < 0) |
298 | panic("munmap failed, errno = %d\n", | 296 | panic("munmap failed, errno = %d\n", |
299 | -err); | 297 | -err); |
300 | } | 298 | } |
@@ -303,15 +301,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
303 | } | 301 | } |
304 | 302 | ||
305 | pmd = pmd_offset(pud, addr); | 303 | pmd = pmd_offset(pud, addr); |
306 | if(!pmd_present(*pmd)){ | 304 | if (!pmd_present(*pmd)) { |
307 | last = ADD_ROUND(addr, PMD_SIZE); | 305 | last = ADD_ROUND(addr, PMD_SIZE); |
308 | if(last > end) | 306 | if (last > end) |
309 | last = end; | 307 | last = end; |
310 | if(pmd_newpage(*pmd)){ | 308 | if (pmd_newpage(*pmd)) { |
311 | updated = 1; | 309 | updated = 1; |
312 | err = os_unmap_memory((void *) addr, | 310 | err = os_unmap_memory((void *) addr, |
313 | last - addr); | 311 | last - addr); |
314 | if(err < 0) | 312 | if (err < 0) |
315 | panic("munmap failed, errno = %d\n", | 313 | panic("munmap failed, errno = %d\n", |
316 | -err); | 314 | -err); |
317 | } | 315 | } |
@@ -320,25 +318,25 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
320 | } | 318 | } |
321 | 319 | ||
322 | pte = pte_offset_kernel(pmd, addr); | 320 | pte = pte_offset_kernel(pmd, addr); |
323 | if(!pte_present(*pte) || pte_newpage(*pte)){ | 321 | if (!pte_present(*pte) || pte_newpage(*pte)) { |
324 | updated = 1; | 322 | updated = 1; |
325 | err = os_unmap_memory((void *) addr, | 323 | err = os_unmap_memory((void *) addr, |
326 | PAGE_SIZE); | 324 | PAGE_SIZE); |
327 | if(err < 0) | 325 | if (err < 0) |
328 | panic("munmap failed, errno = %d\n", | 326 | panic("munmap failed, errno = %d\n", |
329 | -err); | 327 | -err); |
330 | if(pte_present(*pte)) | 328 | if (pte_present(*pte)) |
331 | map_memory(addr, | 329 | map_memory(addr, |
332 | pte_val(*pte) & PAGE_MASK, | 330 | pte_val(*pte) & PAGE_MASK, |
333 | PAGE_SIZE, 1, 1, 1); | 331 | PAGE_SIZE, 1, 1, 1); |
334 | } | 332 | } |
335 | else if(pte_newprot(*pte)){ | 333 | else if (pte_newprot(*pte)) { |
336 | updated = 1; | 334 | updated = 1; |
337 | os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); | 335 | os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); |
338 | } | 336 | } |
339 | addr += PAGE_SIZE; | 337 | addr += PAGE_SIZE; |
340 | } | 338 | } |
341 | return(updated); | 339 | return updated; |
342 | } | 340 | } |
343 | 341 | ||
344 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | 342 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) |
@@ -354,15 +352,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | |||
354 | 352 | ||
355 | address &= PAGE_MASK; | 353 | address &= PAGE_MASK; |
356 | pgd = pgd_offset(mm, address); | 354 | pgd = pgd_offset(mm, address); |
357 | if(!pgd_present(*pgd)) | 355 | if (!pgd_present(*pgd)) |
358 | goto kill; | 356 | goto kill; |
359 | 357 | ||
360 | pud = pud_offset(pgd, address); | 358 | pud = pud_offset(pgd, address); |
361 | if(!pud_present(*pud)) | 359 | if (!pud_present(*pud)) |
362 | goto kill; | 360 | goto kill; |
363 | 361 | ||
364 | pmd = pmd_offset(pud, address); | 362 | pmd = pmd_offset(pud, address); |
365 | if(!pmd_present(*pmd)) | 363 | if (!pmd_present(*pmd)) |
366 | goto kill; | 364 | goto kill; |
367 | 365 | ||
368 | pte = pte_offset_kernel(pmd, address); | 366 | pte = pte_offset_kernel(pmd, address); |
@@ -380,8 +378,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | |||
380 | mm_id = &mm->context.skas.id; | 378 | mm_id = &mm->context.skas.id; |
381 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | 379 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | |
382 | (x ? UM_PROT_EXEC : 0)); | 380 | (x ? UM_PROT_EXEC : 0)); |
383 | if(pte_newpage(*pte)){ | 381 | if (pte_newpage(*pte)) { |
384 | if(pte_present(*pte)){ | 382 | if (pte_present(*pte)) { |
385 | unsigned long long offset; | 383 | unsigned long long offset; |
386 | int fd; | 384 | int fd; |
387 | 385 | ||
@@ -391,10 +389,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | |||
391 | } | 389 | } |
392 | else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); | 390 | else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); |
393 | } | 391 | } |
394 | else if(pte_newprot(*pte)) | 392 | else if (pte_newprot(*pte)) |
395 | err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush); | 393 | err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush); |
396 | 394 | ||
397 | if(err) | 395 | if (err) |
398 | goto kill; | 396 | goto kill; |
399 | 397 | ||
400 | *pte = pte_mkuptodate(*pte); | 398 | *pte = pte_mkuptodate(*pte); |
@@ -402,28 +400,28 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | |||
402 | return; | 400 | return; |
403 | 401 | ||
404 | kill: | 402 | kill: |
405 | printk("Failed to flush page for address 0x%lx\n", address); | 403 | printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address); |
406 | force_sig(SIGKILL, current); | 404 | force_sig(SIGKILL, current); |
407 | } | 405 | } |
408 | 406 | ||
409 | pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) | 407 | pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) |
410 | { | 408 | { |
411 | return(pgd_offset(mm, address)); | 409 | return pgd_offset(mm, address); |
412 | } | 410 | } |
413 | 411 | ||
414 | pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) | 412 | pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) |
415 | { | 413 | { |
416 | return(pud_offset(pgd, address)); | 414 | return pud_offset(pgd, address); |
417 | } | 415 | } |
418 | 416 | ||
419 | pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) | 417 | pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) |
420 | { | 418 | { |
421 | return(pmd_offset(pud, address)); | 419 | return pmd_offset(pud, address); |
422 | } | 420 | } |
423 | 421 | ||
424 | pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) | 422 | pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) |
425 | { | 423 | { |
426 | return(pte_offset_kernel(pmd, address)); | 424 | return pte_offset_kernel(pmd, address); |
427 | } | 425 | } |
428 | 426 | ||
429 | pte_t *addr_pte(struct task_struct *task, unsigned long addr) | 427 | pte_t *addr_pte(struct task_struct *task, unsigned long addr) |
@@ -432,7 +430,7 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr) | |||
432 | pud_t *pud = pud_offset(pgd, addr); | 430 | pud_t *pud = pud_offset(pgd, addr); |
433 | pmd_t *pmd = pmd_offset(pud, addr); | 431 | pmd_t *pmd = pmd_offset(pud, addr); |
434 | 432 | ||
435 | return(pte_offset_map(pmd, addr)); | 433 | return pte_offset_map(pmd, addr); |
436 | } | 434 | } |
437 | 435 | ||
438 | void flush_tlb_all(void) | 436 | void flush_tlb_all(void) |
@@ -452,18 +450,18 @@ void flush_tlb_kernel_vm(void) | |||
452 | 450 | ||
453 | void __flush_tlb_one(unsigned long addr) | 451 | void __flush_tlb_one(unsigned long addr) |
454 | { | 452 | { |
455 | flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); | 453 | flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); |
456 | } | 454 | } |
457 | 455 | ||
458 | static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, | 456 | static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, |
459 | int finished, void **flush) | 457 | int finished, void **flush) |
460 | { | 458 | { |
461 | struct host_vm_op *op; | 459 | struct host_vm_op *op; |
462 | int i, ret = 0; | 460 | int i, ret = 0; |
463 | 461 | ||
464 | for(i = 0; i <= last && !ret; i++){ | 462 | for (i = 0; i <= last && !ret; i++) { |
465 | op = &ops[i]; | 463 | op = &ops[i]; |
466 | switch(op->type){ | 464 | switch(op->type) { |
467 | case MMAP: | 465 | case MMAP: |
468 | ret = map(&mmu->skas.id, op->u.mmap.addr, | 466 | ret = map(&mmu->skas.id, op->u.mmap.addr, |
469 | op->u.mmap.len, op->u.mmap.prot, | 467 | op->u.mmap.len, op->u.mmap.prot, |
@@ -480,7 +478,8 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, | |||
480 | finished, flush); | 478 | finished, flush); |
481 | break; | 479 | break; |
482 | default: | 480 | default: |
483 | printk("Unknown op type %d in do_ops\n", op->type); | 481 | printk(KERN_ERR "Unknown op type %d in do_ops\n", |
482 | op->type); | ||
484 | break; | 483 | break; |
485 | } | 484 | } |
486 | } | 485 | } |
@@ -491,32 +490,33 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, | |||
491 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, | 490 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, |
492 | unsigned long end_addr, int force) | 491 | unsigned long end_addr, int force) |
493 | { | 492 | { |
494 | if(!proc_mm && (end_addr > CONFIG_STUB_START)) | 493 | if (!proc_mm && (end_addr > CONFIG_STUB_START)) |
495 | end_addr = CONFIG_STUB_START; | 494 | end_addr = CONFIG_STUB_START; |
496 | 495 | ||
497 | fix_range_common(mm, start_addr, end_addr, force, do_ops); | 496 | fix_range_common(mm, start_addr, end_addr, force, do_ops); |
498 | } | 497 | } |
499 | 498 | ||
500 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 499 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
501 | unsigned long end) | 500 | unsigned long end) |
502 | { | 501 | { |
503 | if(vma->vm_mm == NULL) | 502 | if (vma->vm_mm == NULL) |
504 | flush_tlb_kernel_range_common(start, end); | 503 | flush_tlb_kernel_range_common(start, end); |
505 | else fix_range(vma->vm_mm, start, end, 0); | 504 | else fix_range(vma->vm_mm, start, end, 0); |
506 | } | 505 | } |
507 | 506 | ||
508 | void flush_tlb_mm(struct mm_struct *mm) | 507 | void flush_tlb_mm(struct mm_struct *mm) |
509 | { | 508 | { |
510 | unsigned long end; | 509 | unsigned long end; |
511 | 510 | ||
512 | /* Don't bother flushing if this address space is about to be | 511 | /* |
513 | * destroyed. | 512 | * Don't bother flushing if this address space is about to be |
514 | */ | 513 | * destroyed. |
515 | if(atomic_read(&mm->mm_users) == 0) | 514 | */ |
516 | return; | 515 | if (atomic_read(&mm->mm_users) == 0) |
516 | return; | ||
517 | 517 | ||
518 | end = proc_mm ? task_size : CONFIG_STUB_START; | 518 | end = proc_mm ? task_size : CONFIG_STUB_START; |
519 | fix_range(mm, 0, end, 0); | 519 | fix_range(mm, 0, end, 0); |
520 | } | 520 | } |
521 | 521 | ||
522 | void force_flush_all(void) | 522 | void force_flush_all(void) |
@@ -524,7 +524,7 @@ void force_flush_all(void) | |||
524 | struct mm_struct *mm = current->mm; | 524 | struct mm_struct *mm = current->mm; |
525 | struct vm_area_struct *vma = mm->mmap; | 525 | struct vm_area_struct *vma = mm->mmap; |
526 | 526 | ||
527 | while(vma != NULL) { | 527 | while (vma != NULL) { |
528 | fix_range(mm, vma->vm_start, vma->vm_end, 1); | 528 | fix_range(mm, vma->vm_start, vma->vm_end, 1); |
529 | vma = vma->vm_next; | 529 | vma = vma->vm_next; |
530 | } | 530 | } |
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 5f3e13c365e5..1993e5e12256 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c | |||
@@ -1,39 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/kernel.h" | ||
7 | #include "linux/sched.h" | ||
8 | #include "linux/notifier.h" | ||
9 | #include "linux/mm.h" | ||
10 | #include "linux/types.h" | ||
11 | #include "linux/tty.h" | ||
12 | #include "linux/init.h" | ||
13 | #include "linux/bootmem.h" | ||
14 | #include "linux/spinlock.h" | ||
15 | #include "linux/utsname.h" | ||
16 | #include "linux/sysrq.h" | ||
17 | #include "linux/seq_file.h" | ||
18 | #include "linux/delay.h" | 6 | #include "linux/delay.h" |
7 | #include "linux/mm.h" | ||
19 | #include "linux/module.h" | 8 | #include "linux/module.h" |
9 | #include "linux/seq_file.h" | ||
10 | #include "linux/string.h" | ||
20 | #include "linux/utsname.h" | 11 | #include "linux/utsname.h" |
21 | #include "asm/page.h" | ||
22 | #include "asm/pgtable.h" | 12 | #include "asm/pgtable.h" |
23 | #include "asm/ptrace.h" | 13 | #include "asm/processor.h" |
24 | #include "asm/elf.h" | ||
25 | #include "asm/user.h" | ||
26 | #include "asm/setup.h" | 14 | #include "asm/setup.h" |
27 | #include "ubd_user.h" | ||
28 | #include "asm/current.h" | ||
29 | #include "kern_util.h" | ||
30 | #include "as-layout.h" | ||
31 | #include "arch.h" | 15 | #include "arch.h" |
16 | #include "as-layout.h" | ||
17 | #include "init.h" | ||
32 | #include "kern.h" | 18 | #include "kern.h" |
33 | #include "mem_user.h" | 19 | #include "mem_user.h" |
34 | #include "mem.h" | ||
35 | #include "initrd.h" | ||
36 | #include "init.h" | ||
37 | #include "os.h" | 20 | #include "os.h" |
38 | #include "skas.h" | 21 | #include "skas.h" |
39 | 22 | ||
@@ -48,7 +31,7 @@ static void __init add_arg(char *arg) | |||
48 | printf("add_arg: Too many command line arguments!\n"); | 31 | printf("add_arg: Too many command line arguments!\n"); |
49 | exit(1); | 32 | exit(1); |
50 | } | 33 | } |
51 | if(strlen(command_line) > 0) | 34 | if (strlen(command_line) > 0) |
52 | strcat(command_line, " "); | 35 | strcat(command_line, " "); |
53 | strcat(command_line, arg); | 36 | strcat(command_line, arg); |
54 | } | 37 | } |
@@ -133,7 +116,7 @@ static int have_root __initdata = 0; | |||
133 | /* Set in uml_mem_setup and modified in linux_main */ | 116 | /* Set in uml_mem_setup and modified in linux_main */ |
134 | long long physmem_size = 32 * 1024 * 1024; | 117 | long long physmem_size = 32 * 1024 * 1024; |
135 | 118 | ||
136 | static char *usage_string = | 119 | static char *usage_string = |
137 | "User Mode Linux v%s\n" | 120 | "User Mode Linux v%s\n" |
138 | " available at http://user-mode-linux.sourceforge.net/\n\n"; | 121 | " available at http://user-mode-linux.sourceforge.net/\n\n"; |
139 | 122 | ||
@@ -191,7 +174,7 @@ static int __init uml_ncpus_setup(char *line, int *add) | |||
191 | 174 | ||
192 | __uml_setup("ncpus=", uml_ncpus_setup, | 175 | __uml_setup("ncpus=", uml_ncpus_setup, |
193 | "ncpus=<# of desired CPUs>\n" | 176 | "ncpus=<# of desired CPUs>\n" |
194 | " This tells an SMP kernel how many virtual processors to start.\n\n" | 177 | " This tells an SMP kernel how many virtual processors to start.\n\n" |
195 | ); | 178 | ); |
196 | #endif | 179 | #endif |
197 | 180 | ||
@@ -223,9 +206,8 @@ static int __init uml_checksetup(char *line, int *add) | |||
223 | int n; | 206 | int n; |
224 | 207 | ||
225 | n = strlen(p->str); | 208 | n = strlen(p->str); |
226 | if(!strncmp(line, p->str, n)){ | 209 | if (!strncmp(line, p->str, n) && p->setup_func(line + n, add)) |
227 | if (p->setup_func(line + n, add)) return 1; | 210 | return 1; |
228 | } | ||
229 | p++; | 211 | p++; |
230 | } | 212 | } |
231 | return 0; | 213 | return 0; |
@@ -236,7 +218,7 @@ static void __init uml_postsetup(void) | |||
236 | initcall_t *p; | 218 | initcall_t *p; |
237 | 219 | ||
238 | p = &__uml_postsetup_start; | 220 | p = &__uml_postsetup_start; |
239 | while(p < &__uml_postsetup_end){ | 221 | while(p < &__uml_postsetup_end) { |
240 | (*p)(); | 222 | (*p)(); |
241 | p++; | 223 | p++; |
242 | } | 224 | } |
@@ -272,16 +254,18 @@ int __init linux_main(int argc, char **argv) | |||
272 | unsigned int i, add; | 254 | unsigned int i, add; |
273 | char * mode; | 255 | char * mode; |
274 | 256 | ||
275 | for (i = 1; i < argc; i++){ | 257 | for (i = 1; i < argc; i++) { |
276 | if((i == 1) && (argv[i][0] == ' ')) continue; | 258 | if ((i == 1) && (argv[i][0] == ' ')) |
259 | continue; | ||
277 | add = 1; | 260 | add = 1; |
278 | uml_checksetup(argv[i], &add); | 261 | uml_checksetup(argv[i], &add); |
279 | if (add) | 262 | if (add) |
280 | add_arg(argv[i]); | 263 | add_arg(argv[i]); |
281 | } | 264 | } |
282 | if(have_root == 0) | 265 | if (have_root == 0) |
283 | add_arg(DEFAULT_COMMAND_LINE); | 266 | add_arg(DEFAULT_COMMAND_LINE); |
284 | 267 | ||
268 | /* OS sanity checks that need to happen before the kernel runs */ | ||
285 | os_early_checks(); | 269 | os_early_checks(); |
286 | 270 | ||
287 | can_do_skas(); | 271 | can_do_skas(); |
@@ -302,12 +286,14 @@ int __init linux_main(int argc, char **argv) | |||
302 | 286 | ||
303 | brk_start = (unsigned long) sbrk(0); | 287 | brk_start = (unsigned long) sbrk(0); |
304 | 288 | ||
305 | /* Increase physical memory size for exec-shield users | 289 | /* |
306 | so they actually get what they asked for. This should | 290 | * Increase physical memory size for exec-shield users |
307 | add zero for non-exec shield users */ | 291 | * so they actually get what they asked for. This should |
292 | * add zero for non-exec shield users | ||
293 | */ | ||
308 | 294 | ||
309 | diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); | 295 | diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); |
310 | if(diff > 1024 * 1024){ | 296 | if (diff > 1024 * 1024) { |
311 | printf("Adding %ld bytes to physical memory to account for " | 297 | printf("Adding %ld bytes to physical memory to account for " |
312 | "exec-shield gap\n", diff); | 298 | "exec-shield gap\n", diff); |
313 | physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); | 299 | physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); |
@@ -324,11 +310,12 @@ int __init linux_main(int argc, char **argv) | |||
324 | iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; | 310 | iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; |
325 | max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC; | 311 | max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC; |
326 | 312 | ||
327 | /* Zones have to begin on a 1 << MAX_ORDER page boundary, | 313 | /* |
314 | * Zones have to begin on a 1 << MAX_ORDER page boundary, | ||
328 | * so this makes sure that's true for highmem | 315 | * so this makes sure that's true for highmem |
329 | */ | 316 | */ |
330 | max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); | 317 | max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); |
331 | if(physmem_size + iomem_size > max_physmem){ | 318 | if (physmem_size + iomem_size > max_physmem) { |
332 | highmem = physmem_size + iomem_size - max_physmem; | 319 | highmem = physmem_size + iomem_size - max_physmem; |
333 | physmem_size -= highmem; | 320 | physmem_size -= highmem; |
334 | #ifndef CONFIG_HIGHMEM | 321 | #ifndef CONFIG_HIGHMEM |
@@ -345,7 +332,7 @@ int __init linux_main(int argc, char **argv) | |||
345 | start_vm = VMALLOC_START; | 332 | start_vm = VMALLOC_START; |
346 | 333 | ||
347 | setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); | 334 | setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); |
348 | if(init_maps(physmem_size, iomem_size, highmem)){ | 335 | if (init_maps(physmem_size, iomem_size, highmem)) { |
349 | printf("Failed to allocate mem_map for %Lu bytes of physical " | 336 | printf("Failed to allocate mem_map for %Lu bytes of physical " |
350 | "memory and %Lu bytes of highmem\n", physmem_size, | 337 | "memory and %Lu bytes of highmem\n", physmem_size, |
351 | highmem); | 338 | highmem); |
@@ -354,10 +341,11 @@ int __init linux_main(int argc, char **argv) | |||
354 | 341 | ||
355 | virtmem_size = physmem_size; | 342 | virtmem_size = physmem_size; |
356 | avail = get_kmem_end() - start_vm; | 343 | avail = get_kmem_end() - start_vm; |
357 | if(physmem_size > avail) virtmem_size = avail; | 344 | if (physmem_size > avail) |
345 | virtmem_size = avail; | ||
358 | end_vm = start_vm + virtmem_size; | 346 | end_vm = start_vm + virtmem_size; |
359 | 347 | ||
360 | if(virtmem_size < physmem_size) | 348 | if (virtmem_size < physmem_size) |
361 | printf("Kernel virtual memory size shrunk to %lu bytes\n", | 349 | printf("Kernel virtual memory size shrunk to %lu bytes\n", |
362 | virtmem_size); | 350 | virtmem_size); |
363 | 351 | ||
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c index 11c2b01a92bd..68454daf958d 100644 --- a/arch/um/os-Linux/aio.c +++ b/arch/um/os-Linux/aio.c | |||
@@ -1,19 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <stdlib.h> | ||
7 | #include <unistd.h> | 6 | #include <unistd.h> |
7 | #include <sched.h> | ||
8 | #include <signal.h> | 8 | #include <signal.h> |
9 | #include <errno.h> | 9 | #include <errno.h> |
10 | #include <sched.h> | 10 | #include <sys/time.h> |
11 | #include <sys/syscall.h> | 11 | #include <asm/unistd.h> |
12 | #include "os.h" | ||
13 | #include "aio.h" | 12 | #include "aio.h" |
14 | #include "init.h" | 13 | #include "init.h" |
15 | #include "user.h" | ||
16 | #include "kern_constants.h" | 14 | #include "kern_constants.h" |
15 | #include "os.h" | ||
16 | #include "user.h" | ||
17 | 17 | ||
18 | struct aio_thread_req { | 18 | struct aio_thread_req { |
19 | enum aio_type type; | 19 | enum aio_type type; |
@@ -27,7 +27,8 @@ struct aio_thread_req { | |||
27 | #if defined(HAVE_AIO_ABI) | 27 | #if defined(HAVE_AIO_ABI) |
28 | #include <linux/aio_abi.h> | 28 | #include <linux/aio_abi.h> |
29 | 29 | ||
30 | /* If we have the headers, we are going to build with AIO enabled. | 30 | /* |
31 | * If we have the headers, we are going to build with AIO enabled. | ||
31 | * If we don't have aio in libc, we define the necessary stubs here. | 32 | * If we don't have aio in libc, we define the necessary stubs here. |
32 | */ | 33 | */ |
33 | 34 | ||
@@ -51,7 +52,8 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, | |||
51 | 52 | ||
52 | #endif | 53 | #endif |
53 | 54 | ||
54 | /* The AIO_MMAP cases force the mmapped page into memory here | 55 | /* |
56 | * The AIO_MMAP cases force the mmapped page into memory here | ||
55 | * rather than in whatever place first touches the data. I used | 57 | * rather than in whatever place first touches the data. I used |
56 | * to do this by touching the page, but that's delicate because | 58 | * to do this by touching the page, but that's delicate because |
57 | * gcc is prone to optimizing that away. So, what's done here | 59 | * gcc is prone to optimizing that away. So, what's done here |
@@ -105,12 +107,12 @@ static int aio_thread(void *arg) | |||
105 | 107 | ||
106 | signal(SIGWINCH, SIG_IGN); | 108 | signal(SIGWINCH, SIG_IGN); |
107 | 109 | ||
108 | while(1){ | 110 | while (1) { |
109 | n = io_getevents(ctx, 1, 1, &event, NULL); | 111 | n = io_getevents(ctx, 1, 1, &event, NULL); |
110 | if(n < 0){ | 112 | if (n < 0) { |
111 | if(errno == EINTR) | 113 | if (errno == EINTR) |
112 | continue; | 114 | continue; |
113 | printk("aio_thread - io_getevents failed, " | 115 | printk(UM_KERN_ERR "aio_thread - io_getevents failed, " |
114 | "errno = %d\n", errno); | 116 | "errno = %d\n", errno); |
115 | } | 117 | } |
116 | else { | 118 | else { |
@@ -119,9 +121,9 @@ static int aio_thread(void *arg) | |||
119 | .err = event.res }); | 121 | .err = event.res }); |
120 | reply_fd = ((struct aio_context *) reply.data)->reply_fd; | 122 | reply_fd = ((struct aio_context *) reply.data)->reply_fd; |
121 | err = write(reply_fd, &reply, sizeof(reply)); | 123 | err = write(reply_fd, &reply, sizeof(reply)); |
122 | if(err != sizeof(reply)) | 124 | if (err != sizeof(reply)) |
123 | printk("aio_thread - write failed, fd = %d, " | 125 | printk(UM_KERN_ERR "aio_thread - write failed, " |
124 | "err = %d\n", reply_fd, errno); | 126 | "fd = %d, err = %d\n", reply_fd, errno); |
125 | } | 127 | } |
126 | } | 128 | } |
127 | return 0; | 129 | return 0; |
@@ -136,10 +138,10 @@ static int do_not_aio(struct aio_thread_req *req) | |||
136 | int n; | 138 | int n; |
137 | 139 | ||
138 | actual = lseek64(req->io_fd, req->offset, SEEK_SET); | 140 | actual = lseek64(req->io_fd, req->offset, SEEK_SET); |
139 | if(actual != req->offset) | 141 | if (actual != req->offset) |
140 | return -errno; | 142 | return -errno; |
141 | 143 | ||
142 | switch(req->type){ | 144 | switch(req->type) { |
143 | case AIO_READ: | 145 | case AIO_READ: |
144 | n = read(req->io_fd, req->buf, req->len); | 146 | n = read(req->io_fd, req->buf, req->len); |
145 | break; | 147 | break; |
@@ -150,11 +152,12 @@ static int do_not_aio(struct aio_thread_req *req) | |||
150 | n = read(req->io_fd, &c, sizeof(c)); | 152 | n = read(req->io_fd, &c, sizeof(c)); |
151 | break; | 153 | break; |
152 | default: | 154 | default: |
153 | printk("do_not_aio - bad request type : %d\n", req->type); | 155 | printk(UM_KERN_ERR "do_not_aio - bad request type : %d\n", |
156 | req->type); | ||
154 | return -EINVAL; | 157 | return -EINVAL; |
155 | } | 158 | } |
156 | 159 | ||
157 | if(n < 0) | 160 | if (n < 0) |
158 | return -errno; | 161 | return -errno; |
159 | return 0; | 162 | return 0; |
160 | } | 163 | } |
@@ -172,16 +175,18 @@ static int not_aio_thread(void *arg) | |||
172 | int err; | 175 | int err; |
173 | 176 | ||
174 | signal(SIGWINCH, SIG_IGN); | 177 | signal(SIGWINCH, SIG_IGN); |
175 | while(1){ | 178 | while (1) { |
176 | err = read(aio_req_fd_r, &req, sizeof(req)); | 179 | err = read(aio_req_fd_r, &req, sizeof(req)); |
177 | if(err != sizeof(req)){ | 180 | if (err != sizeof(req)) { |
178 | if(err < 0) | 181 | if (err < 0) |
179 | printk("not_aio_thread - read failed, " | 182 | printk(UM_KERN_ERR "not_aio_thread - " |
180 | "fd = %d, err = %d\n", aio_req_fd_r, | 183 | "read failed, fd = %d, err = %d\n", |
184 | aio_req_fd_r, | ||
181 | errno); | 185 | errno); |
182 | else { | 186 | else { |
183 | printk("not_aio_thread - short read, fd = %d, " | 187 | printk(UM_KERN_ERR "not_aio_thread - short " |
184 | "length = %d\n", aio_req_fd_r, err); | 188 | "read, fd = %d, length = %d\n", |
189 | aio_req_fd_r, err); | ||
185 | } | 190 | } |
186 | continue; | 191 | continue; |
187 | } | 192 | } |
@@ -189,9 +194,9 @@ static int not_aio_thread(void *arg) | |||
189 | reply = ((struct aio_thread_reply) { .data = req.aio, | 194 | reply = ((struct aio_thread_reply) { .data = req.aio, |
190 | .err = err }); | 195 | .err = err }); |
191 | err = write(req.aio->reply_fd, &reply, sizeof(reply)); | 196 | err = write(req.aio->reply_fd, &reply, sizeof(reply)); |
192 | if(err != sizeof(reply)) | 197 | if (err != sizeof(reply)) |
193 | printk("not_aio_thread - write failed, fd = %d, " | 198 | printk(UM_KERN_ERR "not_aio_thread - write failed, " |
194 | "err = %d\n", req.aio->reply_fd, errno); | 199 | "fd = %d, err = %d\n", req.aio->reply_fd, errno); |
195 | } | 200 | } |
196 | 201 | ||
197 | return 0; | 202 | return 0; |
@@ -202,19 +207,19 @@ static int init_aio_24(void) | |||
202 | int fds[2], err; | 207 | int fds[2], err; |
203 | 208 | ||
204 | err = os_pipe(fds, 1, 1); | 209 | err = os_pipe(fds, 1, 1); |
205 | if(err) | 210 | if (err) |
206 | goto out; | 211 | goto out; |
207 | 212 | ||
208 | aio_req_fd_w = fds[0]; | 213 | aio_req_fd_w = fds[0]; |
209 | aio_req_fd_r = fds[1]; | 214 | aio_req_fd_r = fds[1]; |
210 | 215 | ||
211 | err = os_set_fd_block(aio_req_fd_w, 0); | 216 | err = os_set_fd_block(aio_req_fd_w, 0); |
212 | if(err) | 217 | if (err) |
213 | goto out_close_pipe; | 218 | goto out_close_pipe; |
214 | 219 | ||
215 | err = run_helper_thread(not_aio_thread, NULL, | 220 | err = run_helper_thread(not_aio_thread, NULL, |
216 | CLONE_FILES | CLONE_VM | SIGCHLD, &aio_stack); | 221 | CLONE_FILES | CLONE_VM | SIGCHLD, &aio_stack); |
217 | if(err < 0) | 222 | if (err < 0) |
218 | goto out_close_pipe; | 223 | goto out_close_pipe; |
219 | 224 | ||
220 | aio_pid = err; | 225 | aio_pid = err; |
@@ -227,10 +232,11 @@ out_close_pipe: | |||
227 | aio_req_fd_r = -1; | 232 | aio_req_fd_r = -1; |
228 | out: | 233 | out: |
229 | #ifndef HAVE_AIO_ABI | 234 | #ifndef HAVE_AIO_ABI |
230 | printk("/usr/include/linux/aio_abi.h not present during build\n"); | 235 | printk(UM_KERN_INFO "/usr/include/linux/aio_abi.h not present during " |
236 | "build\n"); | ||
231 | #endif | 237 | #endif |
232 | printk("2.6 host AIO support not used - falling back to I/O " | 238 | printk(UM_KERN_INFO "2.6 host AIO support not used - falling back to " |
233 | "thread\n"); | 239 | "I/O thread\n"); |
234 | return 0; | 240 | return 0; |
235 | } | 241 | } |
236 | 242 | ||
@@ -240,21 +246,21 @@ static int init_aio_26(void) | |||
240 | { | 246 | { |
241 | int err; | 247 | int err; |
242 | 248 | ||
243 | if(io_setup(256, &ctx)){ | 249 | if (io_setup(256, &ctx)) { |
244 | err = -errno; | 250 | err = -errno; |
245 | printk("aio_thread failed to initialize context, err = %d\n", | 251 | printk(UM_KERN_ERR "aio_thread failed to initialize context, " |
246 | errno); | 252 | "err = %d\n", errno); |
247 | return err; | 253 | return err; |
248 | } | 254 | } |
249 | 255 | ||
250 | err = run_helper_thread(aio_thread, NULL, | 256 | err = run_helper_thread(aio_thread, NULL, |
251 | CLONE_FILES | CLONE_VM | SIGCHLD, &aio_stack); | 257 | CLONE_FILES | CLONE_VM | SIGCHLD, &aio_stack); |
252 | if(err < 0) | 258 | if (err < 0) |
253 | return err; | 259 | return err; |
254 | 260 | ||
255 | aio_pid = err; | 261 | aio_pid = err; |
256 | 262 | ||
257 | printk("Using 2.6 host AIO\n"); | 263 | printk(UM_KERN_INFO "Using 2.6 host AIO\n"); |
258 | return 0; | 264 | return 0; |
259 | } | 265 | } |
260 | 266 | ||
@@ -265,13 +271,13 @@ static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, | |||
265 | int err; | 271 | int err; |
266 | 272 | ||
267 | err = do_aio(ctx, type, io_fd, buf, len, offset, aio); | 273 | err = do_aio(ctx, type, io_fd, buf, len, offset, aio); |
268 | if(err){ | 274 | if (err) { |
269 | reply = ((struct aio_thread_reply) { .data = aio, | 275 | reply = ((struct aio_thread_reply) { .data = aio, |
270 | .err = err }); | 276 | .err = err }); |
271 | err = write(aio->reply_fd, &reply, sizeof(reply)); | 277 | err = write(aio->reply_fd, &reply, sizeof(reply)); |
272 | if(err != sizeof(reply)){ | 278 | if (err != sizeof(reply)) { |
273 | err = -errno; | 279 | err = -errno; |
274 | printk("submit_aio_26 - write failed, " | 280 | printk(UM_KERN_ERR "submit_aio_26 - write failed, " |
275 | "fd = %d, err = %d\n", aio->reply_fd, -err); | 281 | "fd = %d, err = %d\n", aio->reply_fd, -err); |
276 | } | 282 | } |
277 | else err = 0; | 283 | else err = 0; |
@@ -319,23 +325,24 @@ static int init_aio(void) | |||
319 | { | 325 | { |
320 | int err; | 326 | int err; |
321 | 327 | ||
322 | if(!aio_24){ | 328 | if (!aio_24) { |
323 | err = init_aio_26(); | 329 | err = init_aio_26(); |
324 | if(err && (errno == ENOSYS)){ | 330 | if (err && (errno == ENOSYS)) { |
325 | printk("2.6 AIO not supported on the host - " | 331 | printk(UM_KERN_INFO "2.6 AIO not supported on the " |
326 | "reverting to 2.4 AIO\n"); | 332 | "host - reverting to 2.4 AIO\n"); |
327 | aio_24 = 1; | 333 | aio_24 = 1; |
328 | } | 334 | } |
329 | else return err; | 335 | else return err; |
330 | } | 336 | } |
331 | 337 | ||
332 | if(aio_24) | 338 | if (aio_24) |
333 | return init_aio_24(); | 339 | return init_aio_24(); |
334 | 340 | ||
335 | return 0; | 341 | return 0; |
336 | } | 342 | } |
337 | 343 | ||
338 | /* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio | 344 | /* |
345 | * The reason for the __initcall/__uml_exitcall asymmetry is that init_aio | ||
339 | * needs to be called when the kernel is running because it calls run_helper, | 346 | * needs to be called when the kernel is running because it calls run_helper, |
340 | * which needs get_free_page. exit_aio is a __uml_exitcall because the generic | 347 | * which needs get_free_page. exit_aio is a __uml_exitcall because the generic |
341 | * kernel does not run __exitcalls on shutdown, and can't because many of them | 348 | * kernel does not run __exitcalls on shutdown, and can't because many of them |
@@ -366,7 +373,7 @@ static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len, | |||
366 | int err; | 373 | int err; |
367 | 374 | ||
368 | err = write(aio_req_fd_w, &req, sizeof(req)); | 375 | err = write(aio_req_fd_w, &req, sizeof(req)); |
369 | if(err == sizeof(req)) | 376 | if (err == sizeof(req)) |
370 | err = 0; | 377 | err = 0; |
371 | else err = -errno; | 378 | else err = -errno; |
372 | 379 | ||
@@ -378,9 +385,8 @@ int submit_aio(enum aio_type type, int io_fd, char *buf, int len, | |||
378 | struct aio_context *aio) | 385 | struct aio_context *aio) |
379 | { | 386 | { |
380 | aio->reply_fd = reply_fd; | 387 | aio->reply_fd = reply_fd; |
381 | if(aio_24) | 388 | if (aio_24) |
382 | return submit_aio_24(type, io_fd, buf, len, offset, aio); | 389 | return submit_aio_24(type, io_fd, buf, len, offset, aio); |
383 | else { | 390 | else |
384 | return submit_aio_26(type, io_fd, buf, len, offset, aio); | 391 | return submit_aio_26(type, io_fd, buf, len, offset, aio); |
385 | } | ||
386 | } | 392 | } |
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index f52006ee70e8..d463a8205637 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c | |||
@@ -267,9 +267,9 @@ void os_close_file(int fd) | |||
267 | close(fd); | 267 | close(fd); |
268 | } | 268 | } |
269 | 269 | ||
270 | int os_seek_file(int fd, __u64 offset) | 270 | int os_seek_file(int fd, unsigned long long offset) |
271 | { | 271 | { |
272 | __u64 actual; | 272 | unsigned long long actual; |
273 | 273 | ||
274 | actual = lseek64(fd, offset, SEEK_SET); | 274 | actual = lseek64(fd, offset, SEEK_SET); |
275 | if(actual != offset) | 275 | if(actual != offset) |
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c index aeeecc634733..1518f7a45a24 100644 --- a/arch/um/os-Linux/main.c +++ b/arch/um/os-Linux/main.c | |||
@@ -1,27 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <unistd.h> | ||
7 | #include <stdio.h> | 6 | #include <stdio.h> |
8 | #include <stdlib.h> | 7 | #include <stdlib.h> |
9 | #include <string.h> | 8 | #include <unistd.h> |
10 | #include <signal.h> | ||
11 | #include <errno.h> | 9 | #include <errno.h> |
10 | #include <signal.h> | ||
11 | #include <string.h> | ||
12 | #include <sys/resource.h> | 12 | #include <sys/resource.h> |
13 | #include <sys/mman.h> | ||
14 | #include <sys/user.h> | ||
15 | #include "kern_util.h" | ||
16 | #include "as-layout.h" | 13 | #include "as-layout.h" |
17 | #include "mem_user.h" | ||
18 | #include "irq_user.h" | ||
19 | #include "user.h" | ||
20 | #include "init.h" | 14 | #include "init.h" |
21 | #include "uml-config.h" | 15 | #include "kern_constants.h" |
16 | #include "kern_util.h" | ||
22 | #include "os.h" | 17 | #include "os.h" |
23 | #include "um_malloc.h" | 18 | #include "um_malloc.h" |
24 | #include "kern_constants.h" | ||
25 | 19 | ||
26 | #define PGD_BOUND (4 * 1024 * 1024) | 20 | #define PGD_BOUND (4 * 1024 * 1024) |
27 | #define STACKSIZE (8 * 1024 * 1024) | 21 | #define STACKSIZE (8 * 1024 * 1024) |
@@ -31,13 +25,13 @@ static void set_stklim(void) | |||
31 | { | 25 | { |
32 | struct rlimit lim; | 26 | struct rlimit lim; |
33 | 27 | ||
34 | if(getrlimit(RLIMIT_STACK, &lim) < 0){ | 28 | if (getrlimit(RLIMIT_STACK, &lim) < 0) { |
35 | perror("getrlimit"); | 29 | perror("getrlimit"); |
36 | exit(1); | 30 | exit(1); |
37 | } | 31 | } |
38 | if((lim.rlim_cur == RLIM_INFINITY) || (lim.rlim_cur > STACKSIZE)){ | 32 | if ((lim.rlim_cur == RLIM_INFINITY) || (lim.rlim_cur > STACKSIZE)) { |
39 | lim.rlim_cur = STACKSIZE; | 33 | lim.rlim_cur = STACKSIZE; |
40 | if(setrlimit(RLIMIT_STACK, &lim) < 0){ | 34 | if (setrlimit(RLIMIT_STACK, &lim) < 0) { |
41 | perror("setrlimit"); | 35 | perror("setrlimit"); |
42 | exit(1); | 36 | exit(1); |
43 | } | 37 | } |
@@ -49,7 +43,7 @@ static __init void do_uml_initcalls(void) | |||
49 | initcall_t *call; | 43 | initcall_t *call; |
50 | 44 | ||
51 | call = &__uml_initcall_start; | 45 | call = &__uml_initcall_start; |
52 | while (call < &__uml_initcall_end){ | 46 | while (call < &__uml_initcall_end) { |
53 | (*call)(); | 47 | (*call)(); |
54 | call++; | 48 | call++; |
55 | } | 49 | } |
@@ -68,7 +62,8 @@ static void install_fatal_handler(int sig) | |||
68 | /* All signals are enabled in this handler ... */ | 62 | /* All signals are enabled in this handler ... */ |
69 | sigemptyset(&action.sa_mask); | 63 | sigemptyset(&action.sa_mask); |
70 | 64 | ||
71 | /* ... including the signal being handled, plus we want the | 65 | /* |
66 | * ... including the signal being handled, plus we want the | ||
72 | * handler reset to the default behavior, so that if an exit | 67 | * handler reset to the default behavior, so that if an exit |
73 | * handler is hanging for some reason, the UML will just die | 68 | * handler is hanging for some reason, the UML will just die |
74 | * after this signal is sent a second time. | 69 | * after this signal is sent a second time. |
@@ -76,7 +71,7 @@ static void install_fatal_handler(int sig) | |||
76 | action.sa_flags = SA_RESETHAND | SA_NODEFER; | 71 | action.sa_flags = SA_RESETHAND | SA_NODEFER; |
77 | action.sa_restorer = NULL; | 72 | action.sa_restorer = NULL; |
78 | action.sa_handler = last_ditch_exit; | 73 | action.sa_handler = last_ditch_exit; |
79 | if(sigaction(sig, &action, NULL) < 0){ | 74 | if (sigaction(sig, &action, NULL) < 0) { |
80 | printf("failed to install handler for signal %d - errno = %d\n", | 75 | printf("failed to install handler for signal %d - errno = %d\n", |
81 | errno); | 76 | errno); |
82 | exit(1); | 77 | exit(1); |
@@ -92,7 +87,8 @@ static void setup_env_path(void) | |||
92 | int path_len = 0; | 87 | int path_len = 0; |
93 | 88 | ||
94 | old_path = getenv("PATH"); | 89 | old_path = getenv("PATH"); |
95 | /* if no PATH variable is set or it has an empty value | 90 | /* |
91 | * if no PATH variable is set or it has an empty value | ||
96 | * just use the default + /usr/lib/uml | 92 | * just use the default + /usr/lib/uml |
97 | */ | 93 | */ |
98 | if (!old_path || (path_len = strlen(old_path)) == 0) { | 94 | if (!old_path || (path_len = strlen(old_path)) == 0) { |
@@ -125,38 +121,41 @@ int __init main(int argc, char **argv, char **envp) | |||
125 | setup_env_path(); | 121 | setup_env_path(); |
126 | 122 | ||
127 | new_argv = malloc((argc + 1) * sizeof(char *)); | 123 | new_argv = malloc((argc + 1) * sizeof(char *)); |
128 | if(new_argv == NULL){ | 124 | if (new_argv == NULL) { |
129 | perror("Mallocing argv"); | 125 | perror("Mallocing argv"); |
130 | exit(1); | 126 | exit(1); |
131 | } | 127 | } |
132 | for(i=0;i<argc;i++){ | 128 | for (i = 0; i < argc; i++) { |
133 | new_argv[i] = strdup(argv[i]); | 129 | new_argv[i] = strdup(argv[i]); |
134 | if(new_argv[i] == NULL){ | 130 | if (new_argv[i] == NULL) { |
135 | perror("Mallocing an arg"); | 131 | perror("Mallocing an arg"); |
136 | exit(1); | 132 | exit(1); |
137 | } | 133 | } |
138 | } | 134 | } |
139 | new_argv[argc] = NULL; | 135 | new_argv[argc] = NULL; |
140 | 136 | ||
141 | /* Allow these signals to bring down a UML if all other | 137 | /* |
138 | * Allow these signals to bring down a UML if all other | ||
142 | * methods of control fail. | 139 | * methods of control fail. |
143 | */ | 140 | */ |
144 | install_fatal_handler(SIGINT); | 141 | install_fatal_handler(SIGINT); |
145 | install_fatal_handler(SIGTERM); | 142 | install_fatal_handler(SIGTERM); |
146 | install_fatal_handler(SIGHUP); | 143 | install_fatal_handler(SIGHUP); |
147 | 144 | ||
148 | scan_elf_aux( envp); | 145 | scan_elf_aux(envp); |
149 | 146 | ||
150 | do_uml_initcalls(); | 147 | do_uml_initcalls(); |
151 | ret = linux_main(argc, argv); | 148 | ret = linux_main(argc, argv); |
152 | 149 | ||
153 | /* Disable SIGPROF - I have no idea why libc doesn't do this or turn | 150 | /* |
151 | * Disable SIGPROF - I have no idea why libc doesn't do this or turn | ||
154 | * off the profiling time, but UML dies with a SIGPROF just before | 152 | * off the profiling time, but UML dies with a SIGPROF just before |
155 | * exiting when profiling is active. | 153 | * exiting when profiling is active. |
156 | */ | 154 | */ |
157 | change_sig(SIGPROF, 0); | 155 | change_sig(SIGPROF, 0); |
158 | 156 | ||
159 | /* This signal stuff used to be in the reboot case. However, | 157 | /* |
158 | * This signal stuff used to be in the reboot case. However, | ||
160 | * sometimes a SIGVTALRM can come in when we're halting (reproducably | 159 | * sometimes a SIGVTALRM can come in when we're halting (reproducably |
161 | * when writing out gcov information, presumably because that takes | 160 | * when writing out gcov information, presumably because that takes |
162 | * some time) and cause a segfault. | 161 | * some time) and cause a segfault. |
@@ -167,17 +166,18 @@ int __init main(int argc, char **argv, char **envp) | |||
167 | 166 | ||
168 | /* disable SIGIO for the fds and set SIGIO to be ignored */ | 167 | /* disable SIGIO for the fds and set SIGIO to be ignored */ |
169 | err = deactivate_all_fds(); | 168 | err = deactivate_all_fds(); |
170 | if(err) | 169 | if (err) |
171 | printf("deactivate_all_fds failed, errno = %d\n", -err); | 170 | printf("deactivate_all_fds failed, errno = %d\n", -err); |
172 | 171 | ||
173 | /* Let any pending signals fire now. This ensures | 172 | /* |
173 | * Let any pending signals fire now. This ensures | ||
174 | * that they won't be delivered after the exec, when | 174 | * that they won't be delivered after the exec, when |
175 | * they are definitely not expected. | 175 | * they are definitely not expected. |
176 | */ | 176 | */ |
177 | unblock_signals(); | 177 | unblock_signals(); |
178 | 178 | ||
179 | /* Reboot */ | 179 | /* Reboot */ |
180 | if(ret){ | 180 | if (ret) { |
181 | printf("\n"); | 181 | printf("\n"); |
182 | execvp(new_argv[0], new_argv); | 182 | execvp(new_argv[0], new_argv); |
183 | perror("Failed to exec kernel"); | 183 | perror("Failed to exec kernel"); |
@@ -193,17 +193,18 @@ void *__wrap_malloc(int size) | |||
193 | { | 193 | { |
194 | void *ret; | 194 | void *ret; |
195 | 195 | ||
196 | if(!kmalloc_ok) | 196 | if (!kmalloc_ok) |
197 | return __real_malloc(size); | 197 | return __real_malloc(size); |
198 | else if(size <= UM_KERN_PAGE_SIZE) | 198 | else if (size <= UM_KERN_PAGE_SIZE) |
199 | /* finding contiguous pages can be hard*/ | 199 | /* finding contiguous pages can be hard*/ |
200 | ret = kmalloc(size, UM_GFP_KERNEL); | 200 | ret = kmalloc(size, UM_GFP_KERNEL); |
201 | else ret = vmalloc(size); | 201 | else ret = vmalloc(size); |
202 | 202 | ||
203 | /* glibc people insist that if malloc fails, errno should be | 203 | /* |
204 | * glibc people insist that if malloc fails, errno should be | ||
204 | * set by malloc as well. So we do. | 205 | * set by malloc as well. So we do. |
205 | */ | 206 | */ |
206 | if(ret == NULL) | 207 | if (ret == NULL) |
207 | errno = ENOMEM; | 208 | errno = ENOMEM; |
208 | 209 | ||
209 | return ret; | 210 | return ret; |
@@ -213,7 +214,7 @@ void *__wrap_calloc(int n, int size) | |||
213 | { | 214 | { |
214 | void *ptr = __wrap_malloc(n * size); | 215 | void *ptr = __wrap_malloc(n * size); |
215 | 216 | ||
216 | if(ptr == NULL) | 217 | if (ptr == NULL) |
217 | return NULL; | 218 | return NULL; |
218 | memset(ptr, 0, n * size); | 219 | memset(ptr, 0, n * size); |
219 | return ptr; | 220 | return ptr; |
@@ -227,7 +228,8 @@ void __wrap_free(void *ptr) | |||
227 | { | 228 | { |
228 | unsigned long addr = (unsigned long) ptr; | 229 | unsigned long addr = (unsigned long) ptr; |
229 | 230 | ||
230 | /* We need to know how the allocation happened, so it can be correctly | 231 | /* |
232 | * We need to know how the allocation happened, so it can be correctly | ||
231 | * freed. This is done by seeing what region of memory the pointer is | 233 | * freed. This is done by seeing what region of memory the pointer is |
232 | * in - | 234 | * in - |
233 | * physical memory - kmalloc/kfree | 235 | * physical memory - kmalloc/kfree |
@@ -245,12 +247,12 @@ void __wrap_free(void *ptr) | |||
245 | * there is a possibility for memory leaks. | 247 | * there is a possibility for memory leaks. |
246 | */ | 248 | */ |
247 | 249 | ||
248 | if((addr >= uml_physmem) && (addr < high_physmem)){ | 250 | if ((addr >= uml_physmem) && (addr < high_physmem)) { |
249 | if(kmalloc_ok) | 251 | if (kmalloc_ok) |
250 | kfree(ptr); | 252 | kfree(ptr); |
251 | } | 253 | } |
252 | else if((addr >= start_vm) && (addr < end_vm)){ | 254 | else if ((addr >= start_vm) && (addr < end_vm)) { |
253 | if(kmalloc_ok) | 255 | if (kmalloc_ok) |
254 | vfree(ptr); | 256 | vfree(ptr); |
255 | } | 257 | } |
256 | else __real_free(ptr); | 258 | else __real_free(ptr); |
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index a955e9bcd04d..b2e0d8c4258c 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c | |||
@@ -1,27 +1,23 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <unistd.h> | ||
7 | #include <stdio.h> | 6 | #include <stdio.h> |
7 | #include <unistd.h> | ||
8 | #include <errno.h> | 8 | #include <errno.h> |
9 | #include <signal.h> | 9 | #include <signal.h> |
10 | #include <sys/mman.h> | 10 | #include <sys/mman.h> |
11 | #include <sys/ptrace.h> | ||
11 | #include <sys/wait.h> | 12 | #include <sys/wait.h> |
12 | #include <sys/mman.h> | 13 | #include <asm/unistd.h> |
13 | #include <sys/syscall.h> | 14 | #include "init.h" |
14 | #include "ptrace_user.h" | 15 | #include "kern_constants.h" |
16 | #include "longjmp.h" | ||
15 | #include "os.h" | 17 | #include "os.h" |
16 | #include "user.h" | ||
17 | #include "process.h" | 18 | #include "process.h" |
18 | #include "irq_user.h" | ||
19 | #include "kern_util.h" | ||
20 | #include "longjmp.h" | ||
21 | #include "skas_ptrace.h" | 19 | #include "skas_ptrace.h" |
22 | #include "kern_constants.h" | 20 | #include "user.h" |
23 | #include "uml-config.h" | ||
24 | #include "init.h" | ||
25 | 21 | ||
26 | #define ARBITRARY_ADDR -1 | 22 | #define ARBITRARY_ADDR -1 |
27 | #define FAILURE_PID -1 | 23 | #define FAILURE_PID -1 |
@@ -37,24 +33,25 @@ unsigned long os_process_pc(int pid) | |||
37 | 33 | ||
38 | sprintf(proc_stat, "/proc/%d/stat", pid); | 34 | sprintf(proc_stat, "/proc/%d/stat", pid); |
39 | fd = os_open_file(proc_stat, of_read(OPENFLAGS()), 0); | 35 | fd = os_open_file(proc_stat, of_read(OPENFLAGS()), 0); |
40 | if(fd < 0){ | 36 | if (fd < 0) { |
41 | printk("os_process_pc - couldn't open '%s', err = %d\n", | 37 | printk(UM_KERN_ERR "os_process_pc - couldn't open '%s', " |
42 | proc_stat, -fd); | 38 | "err = %d\n", proc_stat, -fd); |
43 | return ARBITRARY_ADDR; | 39 | return ARBITRARY_ADDR; |
44 | } | 40 | } |
45 | CATCH_EINTR(err = read(fd, buf, sizeof(buf))); | 41 | CATCH_EINTR(err = read(fd, buf, sizeof(buf))); |
46 | if(err < 0){ | 42 | if (err < 0) { |
47 | printk("os_process_pc - couldn't read '%s', err = %d\n", | 43 | printk(UM_KERN_ERR "os_process_pc - couldn't read '%s', " |
48 | proc_stat, errno); | 44 | "err = %d\n", proc_stat, errno); |
49 | os_close_file(fd); | 45 | os_close_file(fd); |
50 | return ARBITRARY_ADDR; | 46 | return ARBITRARY_ADDR; |
51 | } | 47 | } |
52 | os_close_file(fd); | 48 | os_close_file(fd); |
53 | pc = ARBITRARY_ADDR; | 49 | pc = ARBITRARY_ADDR; |
54 | if(sscanf(buf, "%*d " COMM_SCANF " %*c %*d %*d %*d %*d %*d %*d %*d " | 50 | if (sscanf(buf, "%*d " COMM_SCANF " %*c %*d %*d %*d %*d %*d %*d %*d " |
55 | "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d " | 51 | "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d " |
56 | "%*d %*d %*d %*d %*d %lu", &pc) != 1){ | 52 | "%*d %*d %*d %*d %*d %lu", &pc) != 1) { |
57 | printk("os_process_pc - couldn't find pc in '%s'\n", buf); | 53 | printk(UM_KERN_ERR "os_process_pc - couldn't find pc in '%s'\n", |
54 | buf); | ||
58 | } | 55 | } |
59 | return pc; | 56 | return pc; |
60 | } | 57 | } |
@@ -65,28 +62,29 @@ int os_process_parent(int pid) | |||
65 | char data[256]; | 62 | char data[256]; |
66 | int parent, n, fd; | 63 | int parent, n, fd; |
67 | 64 | ||
68 | if(pid == -1) | 65 | if (pid == -1) |
69 | return -1; | 66 | return -1; |
70 | 67 | ||
71 | snprintf(stat, sizeof(stat), "/proc/%d/stat", pid); | 68 | snprintf(stat, sizeof(stat), "/proc/%d/stat", pid); |
72 | fd = os_open_file(stat, of_read(OPENFLAGS()), 0); | 69 | fd = os_open_file(stat, of_read(OPENFLAGS()), 0); |
73 | if(fd < 0){ | 70 | if (fd < 0) { |
74 | printk("Couldn't open '%s', err = %d\n", stat, -fd); | 71 | printk(UM_KERN_ERR "Couldn't open '%s', err = %d\n", stat, -fd); |
75 | return FAILURE_PID; | 72 | return FAILURE_PID; |
76 | } | 73 | } |
77 | 74 | ||
78 | CATCH_EINTR(n = read(fd, data, sizeof(data))); | 75 | CATCH_EINTR(n = read(fd, data, sizeof(data))); |
79 | os_close_file(fd); | 76 | os_close_file(fd); |
80 | 77 | ||
81 | if(n < 0){ | 78 | if (n < 0) { |
82 | printk("Couldn't read '%s', err = %d\n", stat, errno); | 79 | printk(UM_KERN_ERR "Couldn't read '%s', err = %d\n", stat, |
80 | errno); | ||
83 | return FAILURE_PID; | 81 | return FAILURE_PID; |
84 | } | 82 | } |
85 | 83 | ||
86 | parent = FAILURE_PID; | 84 | parent = FAILURE_PID; |
87 | n = sscanf(data, "%*d " COMM_SCANF " %*c %d", &parent); | 85 | n = sscanf(data, "%*d " COMM_SCANF " %*c %d", &parent); |
88 | if(n != 1) | 86 | if (n != 1) |
89 | printk("Failed to scan '%s'\n", data); | 87 | printk(UM_KERN_ERR "Failed to scan '%s'\n", data); |
90 | 88 | ||
91 | return parent; | 89 | return parent; |
92 | } | 90 | } |
@@ -99,9 +97,8 @@ void os_stop_process(int pid) | |||
99 | void os_kill_process(int pid, int reap_child) | 97 | void os_kill_process(int pid, int reap_child) |
100 | { | 98 | { |
101 | kill(pid, SIGKILL); | 99 | kill(pid, SIGKILL); |
102 | if(reap_child) | 100 | if (reap_child) |
103 | CATCH_EINTR(waitpid(pid, NULL, 0)); | 101 | CATCH_EINTR(waitpid(pid, NULL, 0)); |
104 | |||
105 | } | 102 | } |
106 | 103 | ||
107 | /* This is here uniquely to have access to the userspace errno, i.e. the one | 104 | /* This is here uniquely to have access to the userspace errno, i.e. the one |
@@ -129,7 +126,7 @@ void os_kill_ptraced_process(int pid, int reap_child) | |||
129 | kill(pid, SIGKILL); | 126 | kill(pid, SIGKILL); |
130 | ptrace(PTRACE_KILL, pid); | 127 | ptrace(PTRACE_KILL, pid); |
131 | ptrace(PTRACE_CONT, pid); | 128 | ptrace(PTRACE_CONT, pid); |
132 | if(reap_child) | 129 | if (reap_child) |
133 | CATCH_EINTR(waitpid(pid, NULL, 0)); | 130 | CATCH_EINTR(waitpid(pid, NULL, 0)); |
134 | } | 131 | } |
135 | 132 | ||
@@ -153,34 +150,35 @@ int os_map_memory(void *virt, int fd, unsigned long long off, unsigned long len, | |||
153 | void *loc; | 150 | void *loc; |
154 | int prot; | 151 | int prot; |
155 | 152 | ||
156 | prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | | 153 | prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | |
157 | (x ? PROT_EXEC : 0); | 154 | (x ? PROT_EXEC : 0); |
158 | 155 | ||
159 | loc = mmap64((void *) virt, len, prot, MAP_SHARED | MAP_FIXED, | 156 | loc = mmap64((void *) virt, len, prot, MAP_SHARED | MAP_FIXED, |
160 | fd, off); | 157 | fd, off); |
161 | if(loc == MAP_FAILED) | 158 | if (loc == MAP_FAILED) |
162 | return -errno; | 159 | return -errno; |
163 | return 0; | 160 | return 0; |
164 | } | 161 | } |
165 | 162 | ||
166 | int os_protect_memory(void *addr, unsigned long len, int r, int w, int x) | 163 | int os_protect_memory(void *addr, unsigned long len, int r, int w, int x) |
167 | { | 164 | { |
168 | int prot = ((r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | | 165 | int prot = ((r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | |
169 | (x ? PROT_EXEC : 0)); | 166 | (x ? PROT_EXEC : 0)); |
170 | 167 | ||
171 | if(mprotect(addr, len, prot) < 0) | 168 | if (mprotect(addr, len, prot) < 0) |
172 | return -errno; | 169 | return -errno; |
173 | return 0; | 170 | |
171 | return 0; | ||
174 | } | 172 | } |
175 | 173 | ||
176 | int os_unmap_memory(void *addr, int len) | 174 | int os_unmap_memory(void *addr, int len) |
177 | { | 175 | { |
178 | int err; | 176 | int err; |
179 | 177 | ||
180 | err = munmap(addr, len); | 178 | err = munmap(addr, len); |
181 | if(err < 0) | 179 | if (err < 0) |
182 | return -errno; | 180 | return -errno; |
183 | return 0; | 181 | return 0; |
184 | } | 182 | } |
185 | 183 | ||
186 | #ifndef MADV_REMOVE | 184 | #ifndef MADV_REMOVE |
@@ -192,7 +190,7 @@ int os_drop_memory(void *addr, int length) | |||
192 | int err; | 190 | int err; |
193 | 191 | ||
194 | err = madvise(addr, length, MADV_REMOVE); | 192 | err = madvise(addr, length, MADV_REMOVE); |
195 | if(err < 0) | 193 | if (err < 0) |
196 | err = -errno; | 194 | err = -errno; |
197 | return err; | 195 | return err; |
198 | } | 196 | } |
@@ -202,22 +200,24 @@ int __init can_drop_memory(void) | |||
202 | void *addr; | 200 | void *addr; |
203 | int fd, ok = 0; | 201 | int fd, ok = 0; |
204 | 202 | ||
205 | printk("Checking host MADV_REMOVE support..."); | 203 | printk(UM_KERN_INFO "Checking host MADV_REMOVE support..."); |
206 | fd = create_mem_file(UM_KERN_PAGE_SIZE); | 204 | fd = create_mem_file(UM_KERN_PAGE_SIZE); |
207 | if(fd < 0){ | 205 | if (fd < 0) { |
208 | printk("Creating test memory file failed, err = %d\n", -fd); | 206 | printk(UM_KERN_ERR "Creating test memory file failed, " |
207 | "err = %d\n", -fd); | ||
209 | goto out; | 208 | goto out; |
210 | } | 209 | } |
211 | 210 | ||
212 | addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, | 211 | addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, |
213 | MAP_SHARED, fd, 0); | 212 | MAP_SHARED, fd, 0); |
214 | if(addr == MAP_FAILED){ | 213 | if (addr == MAP_FAILED) { |
215 | printk("Mapping test memory file failed, err = %d\n", -errno); | 214 | printk(UM_KERN_ERR "Mapping test memory file failed, " |
215 | "err = %d\n", -errno); | ||
216 | goto out_close; | 216 | goto out_close; |
217 | } | 217 | } |
218 | 218 | ||
219 | if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){ | 219 | if (madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0) { |
220 | printk("MADV_REMOVE failed, err = %d\n", -errno); | 220 | printk(UM_KERN_ERR "MADV_REMOVE failed, err = %d\n", -errno); |
221 | goto out_unmap; | 221 | goto out_unmap; |
222 | } | 222 | } |
223 | 223 | ||
@@ -256,7 +256,7 @@ int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr) | |||
256 | 256 | ||
257 | *jmp_ptr = &buf; | 257 | *jmp_ptr = &buf; |
258 | n = UML_SETJMP(&buf); | 258 | n = UML_SETJMP(&buf); |
259 | if(n != 0) | 259 | if (n != 0) |
260 | return n; | 260 | return n; |
261 | (*fn)(arg); | 261 | (*fn)(arg); |
262 | return 0; | 262 | return 0; |
diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c index ce0b791160e6..14732f98e0a2 100644 --- a/arch/um/os-Linux/registers.c +++ b/arch/um/os-Linux/registers.c | |||
@@ -1,13 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 PathScale, Inc | 2 | * Copyright (C) 2004 PathScale, Inc |
3 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | 4 | * Licensed under the GPL |
4 | */ | 5 | */ |
5 | 6 | ||
6 | #include <errno.h> | 7 | #include <errno.h> |
7 | #include <string.h> | 8 | #include <string.h> |
8 | #include <sys/ptrace.h> | 9 | #include <sys/ptrace.h> |
9 | #include "user.h" | ||
10 | #include "sysdep/ptrace.h" | 10 | #include "sysdep/ptrace.h" |
11 | #include "user.h" | ||
11 | 12 | ||
12 | /* This is set once at boot time and not changed thereafter */ | 13 | /* This is set once at boot time and not changed thereafter */ |
13 | 14 | ||
@@ -23,7 +24,7 @@ void save_registers(int pid, struct uml_pt_regs *regs) | |||
23 | int err; | 24 | int err; |
24 | 25 | ||
25 | err = ptrace(PTRACE_GETREGS, pid, 0, regs->regs); | 26 | err = ptrace(PTRACE_GETREGS, pid, 0, regs->regs); |
26 | if(err < 0) | 27 | if (err < 0) |
27 | panic("save_registers - saving registers failed, errno = %d\n", | 28 | panic("save_registers - saving registers failed, errno = %d\n", |
28 | errno); | 29 | errno); |
29 | } | 30 | } |
@@ -33,7 +34,7 @@ void restore_registers(int pid, struct uml_pt_regs *regs) | |||
33 | int err; | 34 | int err; |
34 | 35 | ||
35 | err = ptrace(PTRACE_SETREGS, pid, 0, regs->regs); | 36 | err = ptrace(PTRACE_SETREGS, pid, 0, regs->regs); |
36 | if(err < 0) | 37 | if (err < 0) |
37 | panic("restore_registers - saving registers failed, " | 38 | panic("restore_registers - saving registers failed, " |
38 | "errno = %d\n", errno); | 39 | "errno = %d\n", errno); |
39 | } | 40 | } |
@@ -43,7 +44,7 @@ void init_registers(int pid) | |||
43 | int err; | 44 | int err; |
44 | 45 | ||
45 | err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs); | 46 | err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs); |
46 | if(err) | 47 | if (err) |
47 | panic("check_ptrace : PTRACE_GETREGS failed, errno = %d", | 48 | panic("check_ptrace : PTRACE_GETREGS failed, errno = %d", |
48 | errno); | 49 | errno); |
49 | } | 50 | } |
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 583424b9797d..49c113b576b7 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c | |||
@@ -1,24 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 PathScale, Inc | 2 | * Copyright (C) 2004 PathScale, Inc |
3 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | 4 | * Licensed under the GPL |
4 | */ | 5 | */ |
5 | 6 | ||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <unistd.h> | ||
9 | #include <stdlib.h> | 7 | #include <stdlib.h> |
10 | #include <errno.h> | ||
11 | #include <stdarg.h> | 8 | #include <stdarg.h> |
12 | #include <string.h> | 9 | #include <errno.h> |
13 | #include <sys/mman.h> | 10 | #include <signal.h> |
14 | #include "user.h" | 11 | #include <strings.h> |
15 | #include "signal_kern.h" | ||
16 | #include "sysdep/sigcontext.h" | ||
17 | #include "sysdep/barrier.h" | ||
18 | #include "sigcontext.h" | ||
19 | #include "os.h" | 12 | #include "os.h" |
13 | #include "sysdep/barrier.h" | ||
14 | #include "sysdep/sigcontext.h" | ||
15 | #include "user.h" | ||
20 | 16 | ||
21 | /* These are the asynchronous signals. SIGVTALRM and SIGARLM are handled | 17 | /* |
18 | * These are the asynchronous signals. SIGVTALRM and SIGARLM are handled | ||
22 | * together under SIGVTALRM_BIT. SIGPROF is excluded because we want to | 19 | * together under SIGVTALRM_BIT. SIGPROF is excluded because we want to |
23 | * be able to profile all of UML, not just the non-critical sections. If | 20 | * be able to profile all of UML, not just the non-critical sections. If |
24 | * profiling is not thread-safe, then that is not my problem. We can disable | 21 | * profiling is not thread-safe, then that is not my problem. We can disable |
@@ -33,7 +30,8 @@ | |||
33 | #define SIGALRM_BIT 2 | 30 | #define SIGALRM_BIT 2 |
34 | #define SIGALRM_MASK (1 << SIGALRM_BIT) | 31 | #define SIGALRM_MASK (1 << SIGALRM_BIT) |
35 | 32 | ||
36 | /* These are used by both the signal handlers and | 33 | /* |
34 | * These are used by both the signal handlers and | ||
37 | * block/unblock_signals. I don't want modifications cached in a | 35 | * block/unblock_signals. I don't want modifications cached in a |
38 | * register - they must go straight to memory. | 36 | * register - they must go straight to memory. |
39 | */ | 37 | */ |
@@ -45,7 +43,7 @@ void sig_handler(int sig, struct sigcontext *sc) | |||
45 | int enabled; | 43 | int enabled; |
46 | 44 | ||
47 | enabled = signals_enabled; | 45 | enabled = signals_enabled; |
48 | if(!enabled && (sig == SIGIO)){ | 46 | if (!enabled && (sig == SIGIO)) { |
49 | pending |= SIGIO_MASK; | 47 | pending |= SIGIO_MASK; |
50 | return; | 48 | return; |
51 | } | 49 | } |
@@ -61,16 +59,16 @@ static void real_alarm_handler(int sig, struct sigcontext *sc) | |||
61 | { | 59 | { |
62 | struct uml_pt_regs regs; | 60 | struct uml_pt_regs regs; |
63 | 61 | ||
64 | if(sig == SIGALRM) | 62 | if (sig == SIGALRM) |
65 | switch_timers(0); | 63 | switch_timers(0); |
66 | 64 | ||
67 | if(sc != NULL) | 65 | if (sc != NULL) |
68 | copy_sc(®s, sc); | 66 | copy_sc(®s, sc); |
69 | regs.is_user = 0; | 67 | regs.is_user = 0; |
70 | unblock_signals(); | 68 | unblock_signals(); |
71 | timer_handler(sig, ®s); | 69 | timer_handler(sig, ®s); |
72 | 70 | ||
73 | if(sig == SIGALRM) | 71 | if (sig == SIGALRM) |
74 | switch_timers(1); | 72 | switch_timers(1); |
75 | } | 73 | } |
76 | 74 | ||
@@ -79,8 +77,8 @@ void alarm_handler(int sig, struct sigcontext *sc) | |||
79 | int enabled; | 77 | int enabled; |
80 | 78 | ||
81 | enabled = signals_enabled; | 79 | enabled = signals_enabled; |
82 | if(!signals_enabled){ | 80 | if (!signals_enabled) { |
83 | if(sig == SIGVTALRM) | 81 | if (sig == SIGVTALRM) |
84 | pending |= SIGVTALRM_MASK; | 82 | pending |= SIGVTALRM_MASK; |
85 | else pending |= SIGALRM_MASK; | 83 | else pending |= SIGALRM_MASK; |
86 | 84 | ||
@@ -99,7 +97,7 @@ void set_sigstack(void *sig_stack, int size) | |||
99 | .ss_sp = (__ptr_t) sig_stack, | 97 | .ss_sp = (__ptr_t) sig_stack, |
100 | .ss_size = size - sizeof(void *) }); | 98 | .ss_size = size - sizeof(void *) }); |
101 | 99 | ||
102 | if(sigaltstack(&stack, NULL) != 0) | 100 | if (sigaltstack(&stack, NULL) != 0) |
103 | panic("enabling signal stack failed, errno = %d\n", errno); | 101 | panic("enabling signal stack failed, errno = %d\n", errno); |
104 | } | 102 | } |
105 | 103 | ||
@@ -109,7 +107,7 @@ void remove_sigstack(void) | |||
109 | .ss_sp = NULL, | 107 | .ss_sp = NULL, |
110 | .ss_size = 0 }); | 108 | .ss_size = 0 }); |
111 | 109 | ||
112 | if(sigaltstack(&stack, NULL) != 0) | 110 | if (sigaltstack(&stack, NULL) != 0) |
113 | panic("disabling signal stack failed, errno = %d\n", errno); | 111 | panic("disabling signal stack failed, errno = %d\n", errno); |
114 | } | 112 | } |
115 | 113 | ||
@@ -133,26 +131,27 @@ void handle_signal(int sig, struct sigcontext *sc) | |||
133 | * with this interrupt. | 131 | * with this interrupt. |
134 | */ | 132 | */ |
135 | bail = to_irq_stack(&pending); | 133 | bail = to_irq_stack(&pending); |
136 | if(bail) | 134 | if (bail) |
137 | return; | 135 | return; |
138 | 136 | ||
139 | nested = pending & 1; | 137 | nested = pending & 1; |
140 | pending &= ~1; | 138 | pending &= ~1; |
141 | 139 | ||
142 | while((sig = ffs(pending)) != 0){ | 140 | while ((sig = ffs(pending)) != 0){ |
143 | sig--; | 141 | sig--; |
144 | pending &= ~(1 << sig); | 142 | pending &= ~(1 << sig); |
145 | (*handlers[sig])(sig, sc); | 143 | (*handlers[sig])(sig, sc); |
146 | } | 144 | } |
147 | 145 | ||
148 | /* Again, pending comes back with a mask of signals | 146 | /* |
147 | * Again, pending comes back with a mask of signals | ||
149 | * that arrived while tearing down the stack. If this | 148 | * that arrived while tearing down the stack. If this |
150 | * is non-zero, we just go back, set up the stack | 149 | * is non-zero, we just go back, set up the stack |
151 | * again, and handle the new interrupts. | 150 | * again, and handle the new interrupts. |
152 | */ | 151 | */ |
153 | if(!nested) | 152 | if (!nested) |
154 | pending = from_irq_stack(nested); | 153 | pending = from_irq_stack(nested); |
155 | } while(pending); | 154 | } while (pending); |
156 | } | 155 | } |
157 | 156 | ||
158 | extern void hard_handler(int sig); | 157 | extern void hard_handler(int sig); |
@@ -170,18 +169,18 @@ void set_handler(int sig, void (*handler)(int), int flags, ...) | |||
170 | sigemptyset(&action.sa_mask); | 169 | sigemptyset(&action.sa_mask); |
171 | 170 | ||
172 | va_start(ap, flags); | 171 | va_start(ap, flags); |
173 | while((mask = va_arg(ap, int)) != -1) | 172 | while ((mask = va_arg(ap, int)) != -1) |
174 | sigaddset(&action.sa_mask, mask); | 173 | sigaddset(&action.sa_mask, mask); |
175 | va_end(ap); | 174 | va_end(ap); |
176 | 175 | ||
177 | action.sa_flags = flags; | 176 | action.sa_flags = flags; |
178 | action.sa_restorer = NULL; | 177 | action.sa_restorer = NULL; |
179 | if(sigaction(sig, &action, NULL) < 0) | 178 | if (sigaction(sig, &action, NULL) < 0) |
180 | panic("sigaction failed - errno = %d\n", errno); | 179 | panic("sigaction failed - errno = %d\n", errno); |
181 | 180 | ||
182 | sigemptyset(&sig_mask); | 181 | sigemptyset(&sig_mask); |
183 | sigaddset(&sig_mask, sig); | 182 | sigaddset(&sig_mask, sig); |
184 | if(sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0) | 183 | if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0) |
185 | panic("sigprocmask failed - errno = %d\n", errno); | 184 | panic("sigprocmask failed - errno = %d\n", errno); |
186 | } | 185 | } |
187 | 186 | ||
@@ -192,13 +191,14 @@ int change_sig(int signal, int on) | |||
192 | sigemptyset(&sigset); | 191 | sigemptyset(&sigset); |
193 | sigaddset(&sigset, signal); | 192 | sigaddset(&sigset, signal); |
194 | sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, &old); | 193 | sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, &old); |
195 | return(!sigismember(&old, signal)); | 194 | return !sigismember(&old, signal); |
196 | } | 195 | } |
197 | 196 | ||
198 | void block_signals(void) | 197 | void block_signals(void) |
199 | { | 198 | { |
200 | signals_enabled = 0; | 199 | signals_enabled = 0; |
201 | /* This must return with signals disabled, so this barrier | 200 | /* |
201 | * This must return with signals disabled, so this barrier | ||
202 | * ensures that writes are flushed out before the return. | 202 | * ensures that writes are flushed out before the return. |
203 | * This might matter if gcc figures out how to inline this and | 203 | * This might matter if gcc figures out how to inline this and |
204 | * decides to shuffle this code into the caller. | 204 | * decides to shuffle this code into the caller. |
@@ -210,27 +210,31 @@ void unblock_signals(void) | |||
210 | { | 210 | { |
211 | int save_pending; | 211 | int save_pending; |
212 | 212 | ||
213 | if(signals_enabled == 1) | 213 | if (signals_enabled == 1) |
214 | return; | 214 | return; |
215 | 215 | ||
216 | /* We loop because the IRQ handler returns with interrupts off. So, | 216 | /* |
217 | * We loop because the IRQ handler returns with interrupts off. So, | ||
217 | * interrupts may have arrived and we need to re-enable them and | 218 | * interrupts may have arrived and we need to re-enable them and |
218 | * recheck pending. | 219 | * recheck pending. |
219 | */ | 220 | */ |
220 | while(1){ | 221 | while(1) { |
221 | /* Save and reset save_pending after enabling signals. This | 222 | /* |
223 | * Save and reset save_pending after enabling signals. This | ||
222 | * way, pending won't be changed while we're reading it. | 224 | * way, pending won't be changed while we're reading it. |
223 | */ | 225 | */ |
224 | signals_enabled = 1; | 226 | signals_enabled = 1; |
225 | 227 | ||
226 | /* Setting signals_enabled and reading pending must | 228 | /* |
229 | * Setting signals_enabled and reading pending must | ||
227 | * happen in this order. | 230 | * happen in this order. |
228 | */ | 231 | */ |
229 | mb(); | 232 | mb(); |
230 | 233 | ||
231 | save_pending = pending; | 234 | save_pending = pending; |
232 | if(save_pending == 0){ | 235 | if (save_pending == 0) { |
233 | /* This must return with signals enabled, so | 236 | /* |
237 | * This must return with signals enabled, so | ||
234 | * this barrier ensures that writes are | 238 | * this barrier ensures that writes are |
235 | * flushed out before the return. This might | 239 | * flushed out before the return. This might |
236 | * matter if gcc figures out how to inline | 240 | * matter if gcc figures out how to inline |
@@ -243,24 +247,26 @@ void unblock_signals(void) | |||
243 | 247 | ||
244 | pending = 0; | 248 | pending = 0; |
245 | 249 | ||
246 | /* We have pending interrupts, so disable signals, as the | 250 | /* |
251 | * We have pending interrupts, so disable signals, as the | ||
247 | * handlers expect them off when they are called. They will | 252 | * handlers expect them off when they are called. They will |
248 | * be enabled again above. | 253 | * be enabled again above. |
249 | */ | 254 | */ |
250 | 255 | ||
251 | signals_enabled = 0; | 256 | signals_enabled = 0; |
252 | 257 | ||
253 | /* Deal with SIGIO first because the alarm handler might | 258 | /* |
259 | * Deal with SIGIO first because the alarm handler might | ||
254 | * schedule, leaving the pending SIGIO stranded until we come | 260 | * schedule, leaving the pending SIGIO stranded until we come |
255 | * back here. | 261 | * back here. |
256 | */ | 262 | */ |
257 | if(save_pending & SIGIO_MASK) | 263 | if (save_pending & SIGIO_MASK) |
258 | sig_handler_common_skas(SIGIO, NULL); | 264 | sig_handler_common_skas(SIGIO, NULL); |
259 | 265 | ||
260 | if(save_pending & SIGALRM_MASK) | 266 | if (save_pending & SIGALRM_MASK) |
261 | real_alarm_handler(SIGALRM, NULL); | 267 | real_alarm_handler(SIGALRM, NULL); |
262 | 268 | ||
263 | if(save_pending & SIGVTALRM_MASK) | 269 | if (save_pending & SIGVTALRM_MASK) |
264 | real_alarm_handler(SIGVTALRM, NULL); | 270 | real_alarm_handler(SIGVTALRM, NULL); |
265 | } | 271 | } |
266 | } | 272 | } |
@@ -273,11 +279,11 @@ int get_signals(void) | |||
273 | int set_signals(int enable) | 279 | int set_signals(int enable) |
274 | { | 280 | { |
275 | int ret; | 281 | int ret; |
276 | if(signals_enabled == enable) | 282 | if (signals_enabled == enable) |
277 | return enable; | 283 | return enable; |
278 | 284 | ||
279 | ret = signals_enabled; | 285 | ret = signals_enabled; |
280 | if(enable) | 286 | if (enable) |
281 | unblock_signals(); | 287 | unblock_signals(); |
282 | else block_signals(); | 288 | else block_signals(); |
283 | 289 | ||
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index ae7685710c46..d58d11179bb7 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c | |||
@@ -1,30 +1,25 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <signal.h> | 6 | #include <stddef.h> |
7 | #include <unistd.h> | ||
7 | #include <errno.h> | 8 | #include <errno.h> |
8 | #include <string.h> | 9 | #include <string.h> |
9 | #include <unistd.h> | ||
10 | #include <sys/mman.h> | 10 | #include <sys/mman.h> |
11 | #include <sys/wait.h> | 11 | #include "init.h" |
12 | #include <asm/unistd.h> | 12 | #include "kern_constants.h" |
13 | #include "mem_user.h" | 13 | #include "mm_id.h" |
14 | #include "mem.h" | ||
15 | #include "skas.h" | ||
16 | #include "user.h" | ||
17 | #include "os.h" | 14 | #include "os.h" |
18 | #include "proc_mm.h" | 15 | #include "proc_mm.h" |
19 | #include "ptrace_user.h" | 16 | #include "ptrace_user.h" |
20 | #include "kern_util.h" | ||
21 | #include "task.h" | ||
22 | #include "registers.h" | 17 | #include "registers.h" |
23 | #include "uml-config.h" | 18 | #include "skas.h" |
19 | #include "user.h" | ||
24 | #include "sysdep/ptrace.h" | 20 | #include "sysdep/ptrace.h" |
25 | #include "sysdep/stub.h" | 21 | #include "sysdep/stub.h" |
26 | #include "init.h" | 22 | #include "uml-config.h" |
27 | #include "kern_constants.h" | ||
28 | 23 | ||
29 | extern unsigned long batch_syscall_stub, __syscall_stub_start; | 24 | extern unsigned long batch_syscall_stub, __syscall_stub_start; |
30 | 25 | ||
@@ -33,7 +28,7 @@ extern void wait_stub_done(int pid); | |||
33 | static inline unsigned long *check_init_stack(struct mm_id * mm_idp, | 28 | static inline unsigned long *check_init_stack(struct mm_id * mm_idp, |
34 | unsigned long *stack) | 29 | unsigned long *stack) |
35 | { | 30 | { |
36 | if(stack == NULL) { | 31 | if (stack == NULL) { |
37 | stack = (unsigned long *) mm_idp->stack + 2; | 32 | stack = (unsigned long *) mm_idp->stack + 2; |
38 | *stack = 0; | 33 | *stack = 0; |
39 | } | 34 | } |
@@ -67,29 +62,30 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr) | |||
67 | unsigned long * syscall; | 62 | unsigned long * syscall; |
68 | int err, pid = mm_idp->u.pid; | 63 | int err, pid = mm_idp->u.pid; |
69 | 64 | ||
70 | if(proc_mm) | 65 | if (proc_mm) |
71 | /* FIXME: Need to look up userspace_pid by cpu */ | 66 | /* FIXME: Need to look up userspace_pid by cpu */ |
72 | pid = userspace_pid[0]; | 67 | pid = userspace_pid[0]; |
73 | 68 | ||
74 | multi_count++; | 69 | multi_count++; |
75 | 70 | ||
76 | n = ptrace_setregs(pid, syscall_regs); | 71 | n = ptrace_setregs(pid, syscall_regs); |
77 | if(n < 0){ | 72 | if (n < 0) { |
78 | printk("Registers - \n"); | 73 | printk(UM_KERN_ERR "Registers - \n"); |
79 | for(i = 0; i < MAX_REG_NR; i++) | 74 | for (i = 0; i < MAX_REG_NR; i++) |
80 | printk("\t%d\t0x%lx\n", i, syscall_regs[i]); | 75 | printk(UM_KERN_ERR "\t%d\t0x%lx\n", i, syscall_regs[i]); |
81 | panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n", | 76 | panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n", |
82 | -n); | 77 | -n); |
83 | } | 78 | } |
84 | 79 | ||
85 | err = ptrace(PTRACE_CONT, pid, 0, 0); | 80 | err = ptrace(PTRACE_CONT, pid, 0, 0); |
86 | if(err) | 81 | if (err) |
87 | panic("Failed to continue stub, pid = %d, errno = %d\n", pid, | 82 | panic("Failed to continue stub, pid = %d, errno = %d\n", pid, |
88 | errno); | 83 | errno); |
89 | 84 | ||
90 | wait_stub_done(pid); | 85 | wait_stub_done(pid); |
91 | 86 | ||
92 | /* When the stub stops, we find the following values on the | 87 | /* |
88 | * When the stub stops, we find the following values on the | ||
93 | * beginning of the stack: | 89 | * beginning of the stack: |
94 | * (long )return_value | 90 | * (long )return_value |
95 | * (long )offset to failed sycall-data (0, if no error) | 91 | * (long )offset to failed sycall-data (0, if no error) |
@@ -99,24 +95,25 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr) | |||
99 | if (offset) { | 95 | if (offset) { |
100 | data = (unsigned long *)(mm_idp->stack + | 96 | data = (unsigned long *)(mm_idp->stack + |
101 | offset - UML_CONFIG_STUB_DATA); | 97 | offset - UML_CONFIG_STUB_DATA); |
102 | printk("do_syscall_stub : ret = %ld, offset = %ld, " | 98 | printk(UM_KERN_ERR "do_syscall_stub : ret = %ld, offset = %ld, " |
103 | "data = %p\n", ret, offset, data); | 99 | "data = %p\n", ret, offset, data); |
104 | syscall = (unsigned long *)((unsigned long)data + data[0]); | 100 | syscall = (unsigned long *)((unsigned long)data + data[0]); |
105 | printk("do_syscall_stub: syscall %ld failed, return value = " | 101 | printk(UM_KERN_ERR "do_syscall_stub: syscall %ld failed, " |
106 | "0x%lx, expected return value = 0x%lx\n", | 102 | "return value = 0x%lx, expected return value = 0x%lx\n", |
107 | syscall[0], ret, syscall[7]); | 103 | syscall[0], ret, syscall[7]); |
108 | printk(" syscall parameters: " | 104 | printk(UM_KERN_ERR " syscall parameters: " |
109 | "0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", | 105 | "0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", |
110 | syscall[1], syscall[2], syscall[3], | 106 | syscall[1], syscall[2], syscall[3], |
111 | syscall[4], syscall[5], syscall[6]); | 107 | syscall[4], syscall[5], syscall[6]); |
112 | for(n = 1; n < data[0]/sizeof(long); n++) { | 108 | for (n = 1; n < data[0]/sizeof(long); n++) { |
113 | if(n == 1) | 109 | if (n == 1) |
114 | printk(" additional syscall data:"); | 110 | printk(UM_KERN_ERR " additional syscall " |
115 | if(n % 4 == 1) | 111 | "data:"); |
116 | printk("\n "); | 112 | if (n % 4 == 1) |
113 | printk("\n" UM_KERN_ERR " "); | ||
117 | printk(" 0x%lx", data[n]); | 114 | printk(" 0x%lx", data[n]); |
118 | } | 115 | } |
119 | if(n > 1) | 116 | if (n > 1) |
120 | printk("\n"); | 117 | printk("\n"); |
121 | } | 118 | } |
122 | else ret = 0; | 119 | else ret = 0; |
@@ -132,7 +129,7 @@ long run_syscall_stub(struct mm_id * mm_idp, int syscall, | |||
132 | { | 129 | { |
133 | unsigned long *stack = check_init_stack(mm_idp, *addr); | 130 | unsigned long *stack = check_init_stack(mm_idp, *addr); |
134 | 131 | ||
135 | if(done && *addr == NULL) | 132 | if (done && *addr == NULL) |
136 | single_count++; | 133 | single_count++; |
137 | 134 | ||
138 | *stack += sizeof(long); | 135 | *stack += sizeof(long); |
@@ -149,8 +146,8 @@ long run_syscall_stub(struct mm_id * mm_idp, int syscall, | |||
149 | *stack = 0; | 146 | *stack = 0; |
150 | multi_op_count++; | 147 | multi_op_count++; |
151 | 148 | ||
152 | if(!done && ((((unsigned long) stack) & ~UM_KERN_PAGE_MASK) < | 149 | if (!done && ((((unsigned long) stack) & ~UM_KERN_PAGE_MASK) < |
153 | UM_KERN_PAGE_SIZE - 10 * sizeof(long))){ | 150 | UM_KERN_PAGE_SIZE - 10 * sizeof(long))) { |
154 | *addr = stack; | 151 | *addr = stack; |
155 | return 0; | 152 | return 0; |
156 | } | 153 | } |
@@ -165,14 +162,15 @@ long syscall_stub_data(struct mm_id * mm_idp, | |||
165 | unsigned long *stack; | 162 | unsigned long *stack; |
166 | int ret = 0; | 163 | int ret = 0; |
167 | 164 | ||
168 | /* If *addr still is uninitialized, it *must* contain NULL. | 165 | /* |
166 | * If *addr still is uninitialized, it *must* contain NULL. | ||
169 | * Thus in this case do_syscall_stub correctly won't be called. | 167 | * Thus in this case do_syscall_stub correctly won't be called. |
170 | */ | 168 | */ |
171 | if((((unsigned long) *addr) & ~UM_KERN_PAGE_MASK) >= | 169 | if ((((unsigned long) *addr) & ~UM_KERN_PAGE_MASK) >= |
172 | UM_KERN_PAGE_SIZE - (10 + data_count) * sizeof(long)) { | 170 | UM_KERN_PAGE_SIZE - (10 + data_count) * sizeof(long)) { |
173 | ret = do_syscall_stub(mm_idp, addr); | 171 | ret = do_syscall_stub(mm_idp, addr); |
174 | /* in case of error, don't overwrite data on stack */ | 172 | /* in case of error, don't overwrite data on stack */ |
175 | if(ret) | 173 | if (ret) |
176 | return ret; | 174 | return ret; |
177 | } | 175 | } |
178 | 176 | ||
@@ -194,7 +192,7 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot, | |||
194 | { | 192 | { |
195 | int ret; | 193 | int ret; |
196 | 194 | ||
197 | if(proc_mm){ | 195 | if (proc_mm) { |
198 | struct proc_mm_op map; | 196 | struct proc_mm_op map; |
199 | int fd = mm_idp->u.mm_fd; | 197 | int fd = mm_idp->u.mm_fd; |
200 | 198 | ||
@@ -210,9 +208,10 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot, | |||
210 | .offset= offset | 208 | .offset= offset |
211 | } } } ); | 209 | } } } ); |
212 | CATCH_EINTR(ret = write(fd, &map, sizeof(map))); | 210 | CATCH_EINTR(ret = write(fd, &map, sizeof(map))); |
213 | if(ret != sizeof(map)){ | 211 | if (ret != sizeof(map)) { |
214 | ret = -errno; | 212 | ret = -errno; |
215 | printk("map : /proc/mm map failed, err = %d\n", -ret); | 213 | printk(UM_KERN_ERR "map : /proc/mm map failed, " |
214 | "err = %d\n", -ret); | ||
216 | } | 215 | } |
217 | else ret = 0; | 216 | else ret = 0; |
218 | } | 217 | } |
@@ -233,7 +232,7 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len, | |||
233 | { | 232 | { |
234 | int ret; | 233 | int ret; |
235 | 234 | ||
236 | if(proc_mm){ | 235 | if (proc_mm) { |
237 | struct proc_mm_op unmap; | 236 | struct proc_mm_op unmap; |
238 | int fd = mm_idp->u.mm_fd; | 237 | int fd = mm_idp->u.mm_fd; |
239 | 238 | ||
@@ -244,9 +243,10 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len, | |||
244 | (unsigned long) addr, | 243 | (unsigned long) addr, |
245 | .len = len } } } ); | 244 | .len = len } } } ); |
246 | CATCH_EINTR(ret = write(fd, &unmap, sizeof(unmap))); | 245 | CATCH_EINTR(ret = write(fd, &unmap, sizeof(unmap))); |
247 | if(ret != sizeof(unmap)){ | 246 | if (ret != sizeof(unmap)) { |
248 | ret = -errno; | 247 | ret = -errno; |
249 | printk("unmap - proc_mm write returned %d\n", ret); | 248 | printk(UM_KERN_ERR "unmap - proc_mm write returned " |
249 | "%d\n", ret); | ||
250 | } | 250 | } |
251 | else ret = 0; | 251 | else ret = 0; |
252 | } | 252 | } |
@@ -267,7 +267,7 @@ int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len, | |||
267 | struct proc_mm_op protect; | 267 | struct proc_mm_op protect; |
268 | int ret; | 268 | int ret; |
269 | 269 | ||
270 | if(proc_mm){ | 270 | if (proc_mm) { |
271 | int fd = mm_idp->u.mm_fd; | 271 | int fd = mm_idp->u.mm_fd; |
272 | 272 | ||
273 | protect = ((struct proc_mm_op) { .op = MM_MPROTECT, | 273 | protect = ((struct proc_mm_op) { .op = MM_MPROTECT, |
@@ -279,9 +279,9 @@ int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len, | |||
279 | .prot = prot } } } ); | 279 | .prot = prot } } } ); |
280 | 280 | ||
281 | CATCH_EINTR(ret = write(fd, &protect, sizeof(protect))); | 281 | CATCH_EINTR(ret = write(fd, &protect, sizeof(protect))); |
282 | if(ret != sizeof(protect)){ | 282 | if (ret != sizeof(protect)) { |
283 | ret = -errno; | 283 | ret = -errno; |
284 | printk("protect failed, err = %d", -ret); | 284 | printk(UM_KERN_ERR "protect failed, err = %d", -ret); |
285 | } | 285 | } |
286 | else ret = 0; | 286 | else ret = 0; |
287 | } | 287 | } |
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index eb027673f357..e12d18cc77da 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c | |||
@@ -1,48 +1,38 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <stdlib.h> | 6 | #include <stdlib.h> |
7 | #include <string.h> | ||
8 | #include <unistd.h> | 7 | #include <unistd.h> |
9 | #include <errno.h> | ||
10 | #include <signal.h> | ||
11 | #include <sched.h> | 8 | #include <sched.h> |
12 | #include "ptrace_user.h" | 9 | #include <errno.h> |
13 | #include <sys/wait.h> | 10 | #include <string.h> |
14 | #include <sys/mman.h> | 11 | #include <sys/mman.h> |
15 | #include <sys/user.h> | 12 | #include <sys/ptrace.h> |
16 | #include <sys/time.h> | 13 | #include <sys/wait.h> |
17 | #include <sys/syscall.h> | 14 | #include <asm/unistd.h> |
18 | #include <asm/types.h> | 15 | #include "as-layout.h" |
19 | #include "user.h" | ||
20 | #include "sysdep/ptrace.h" | ||
21 | #include "kern_util.h" | ||
22 | #include "skas.h" | ||
23 | #include "stub-data.h" | ||
24 | #include "mm_id.h" | ||
25 | #include "sysdep/sigcontext.h" | ||
26 | #include "sysdep/stub.h" | ||
27 | #include "os.h" | ||
28 | #include "proc_mm.h" | ||
29 | #include "skas_ptrace.h" | ||
30 | #include "chan_user.h" | 16 | #include "chan_user.h" |
31 | #include "registers.h" | 17 | #include "kern_constants.h" |
32 | #include "mem.h" | 18 | #include "mem.h" |
33 | #include "uml-config.h" | 19 | #include "os.h" |
34 | #include "process.h" | 20 | #include "process.h" |
35 | #include "longjmp.h" | 21 | #include "proc_mm.h" |
36 | #include "kern_constants.h" | 22 | #include "ptrace_user.h" |
37 | #include "as-layout.h" | 23 | #include "registers.h" |
24 | #include "skas.h" | ||
25 | #include "skas_ptrace.h" | ||
26 | #include "user.h" | ||
27 | #include "sysdep/stub.h" | ||
38 | 28 | ||
39 | int is_skas_winch(int pid, int fd, void *data) | 29 | int is_skas_winch(int pid, int fd, void *data) |
40 | { | 30 | { |
41 | if(pid != os_getpgrp()) | 31 | if (pid != os_getpgrp()) |
42 | return(0); | 32 | return 0; |
43 | 33 | ||
44 | register_winch_irq(-1, fd, -1, data, 0); | 34 | register_winch_irq(-1, fd, -1, data, 0); |
45 | return(1); | 35 | return 1; |
46 | } | 36 | } |
47 | 37 | ||
48 | static int ptrace_dump_regs(int pid) | 38 | static int ptrace_dump_regs(int pid) |
@@ -50,13 +40,12 @@ static int ptrace_dump_regs(int pid) | |||
50 | unsigned long regs[MAX_REG_NR]; | 40 | unsigned long regs[MAX_REG_NR]; |
51 | int i; | 41 | int i; |
52 | 42 | ||
53 | if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) | 43 | if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) |
54 | return -errno; | 44 | return -errno; |
55 | else { | 45 | |
56 | printk("Stub registers -\n"); | 46 | printk(UM_KERN_ERR "Stub registers -\n"); |
57 | for(i = 0; i < ARRAY_SIZE(regs); i++) | 47 | for (i = 0; i < ARRAY_SIZE(regs); i++) |
58 | printk("\t%d - %lx\n", i, regs[i]); | 48 | printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]); |
59 | } | ||
60 | 49 | ||
61 | return 0; | 50 | return 0; |
62 | } | 51 | } |
@@ -74,27 +63,28 @@ void wait_stub_done(int pid) | |||
74 | { | 63 | { |
75 | int n, status, err; | 64 | int n, status, err; |
76 | 65 | ||
77 | while(1){ | 66 | while (1) { |
78 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | 67 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); |
79 | if((n < 0) || !WIFSTOPPED(status)) | 68 | if ((n < 0) || !WIFSTOPPED(status)) |
80 | goto bad_wait; | 69 | goto bad_wait; |
81 | 70 | ||
82 | if(((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0) | 71 | if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0) |
83 | break; | 72 | break; |
84 | 73 | ||
85 | err = ptrace(PTRACE_CONT, pid, 0, 0); | 74 | err = ptrace(PTRACE_CONT, pid, 0, 0); |
86 | if(err) | 75 | if (err) |
87 | panic("wait_stub_done : continue failed, errno = %d\n", | 76 | panic("wait_stub_done : continue failed, errno = %d\n", |
88 | errno); | 77 | errno); |
89 | } | 78 | } |
90 | 79 | ||
91 | if(((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) | 80 | if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) |
92 | return; | 81 | return; |
93 | 82 | ||
94 | bad_wait: | 83 | bad_wait: |
95 | err = ptrace_dump_regs(pid); | 84 | err = ptrace_dump_regs(pid); |
96 | if(err) | 85 | if (err) |
97 | printk("Failed to get registers from stub, errno = %d\n", -err); | 86 | printk(UM_KERN_ERR "Failed to get registers from stub, " |
87 | "errno = %d\n", -err); | ||
98 | panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, " | 88 | panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, " |
99 | "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status); | 89 | "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status); |
100 | } | 90 | } |
@@ -105,9 +95,9 @@ void get_skas_faultinfo(int pid, struct faultinfo * fi) | |||
105 | { | 95 | { |
106 | int err; | 96 | int err; |
107 | 97 | ||
108 | if(ptrace_faultinfo){ | 98 | if (ptrace_faultinfo) { |
109 | err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); | 99 | err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); |
110 | if(err) | 100 | if (err) |
111 | panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " | 101 | panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " |
112 | "errno = %d\n", errno); | 102 | "errno = %d\n", errno); |
113 | 103 | ||
@@ -119,12 +109,13 @@ void get_skas_faultinfo(int pid, struct faultinfo * fi) | |||
119 | } | 109 | } |
120 | else { | 110 | else { |
121 | err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV); | 111 | err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV); |
122 | if(err) | 112 | if (err) |
123 | panic("Failed to continue stub, pid = %d, errno = %d\n", | 113 | panic("Failed to continue stub, pid = %d, errno = %d\n", |
124 | pid, errno); | 114 | pid, errno); |
125 | wait_stub_done(pid); | 115 | wait_stub_done(pid); |
126 | 116 | ||
127 | /* faultinfo is prepared by the stub-segv-handler at start of | 117 | /* |
118 | * faultinfo is prepared by the stub-segv-handler at start of | ||
128 | * the stub stack page. We just have to copy it. | 119 | * the stub stack page. We just have to copy it. |
129 | */ | 120 | */ |
130 | memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); | 121 | memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); |
@@ -137,8 +128,12 @@ static void handle_segv(int pid, struct uml_pt_regs * regs) | |||
137 | segv(regs->faultinfo, 0, 1, NULL); | 128 | segv(regs->faultinfo, 0, 1, NULL); |
138 | } | 129 | } |
139 | 130 | ||
140 | /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/ | 131 | /* |
141 | static void handle_trap(int pid, struct uml_pt_regs *regs, int local_using_sysemu) | 132 | * To use the same value of using_sysemu as the caller, ask it that value |
133 | * (in local_using_sysemu | ||
134 | */ | ||
135 | static void handle_trap(int pid, struct uml_pt_regs *regs, | ||
136 | int local_using_sysemu) | ||
142 | { | 137 | { |
143 | int err, status; | 138 | int err, status; |
144 | 139 | ||
@@ -149,22 +144,22 @@ static void handle_trap(int pid, struct uml_pt_regs *regs, int local_using_sysem | |||
149 | { | 144 | { |
150 | err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, | 145 | err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, |
151 | __NR_getpid); | 146 | __NR_getpid); |
152 | if(err < 0) | 147 | if (err < 0) |
153 | panic("handle_trap - nullifying syscall failed errno = %d\n", | 148 | panic("handle_trap - nullifying syscall failed, " |
154 | errno); | 149 | "errno = %d\n", errno); |
155 | 150 | ||
156 | err = ptrace(PTRACE_SYSCALL, pid, 0, 0); | 151 | err = ptrace(PTRACE_SYSCALL, pid, 0, 0); |
157 | if(err < 0) | 152 | if (err < 0) |
158 | panic("handle_trap - continuing to end of syscall failed, " | 153 | panic("handle_trap - continuing to end of syscall " |
159 | "errno = %d\n", errno); | 154 | "failed, errno = %d\n", errno); |
160 | 155 | ||
161 | CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); | 156 | CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); |
162 | if((err < 0) || !WIFSTOPPED(status) || | 157 | if ((err < 0) || !WIFSTOPPED(status) || |
163 | (WSTOPSIG(status) != SIGTRAP + 0x80)){ | 158 | (WSTOPSIG(status) != SIGTRAP + 0x80)) { |
164 | err = ptrace_dump_regs(pid); | 159 | err = ptrace_dump_regs(pid); |
165 | if(err) | 160 | if (err) |
166 | printk("Failed to get registers from process, " | 161 | printk(UM_KERN_ERR "Failed to get registers " |
167 | "errno = %d\n", -err); | 162 | "from process, errno = %d\n", -err); |
168 | panic("handle_trap - failed to wait at end of syscall, " | 163 | panic("handle_trap - failed to wait at end of syscall, " |
169 | "errno = %d, status = %d\n", errno, status); | 164 | "errno = %d, status = %d\n", errno, status); |
170 | } | 165 | } |
@@ -184,38 +179,39 @@ static int userspace_tramp(void *stack) | |||
184 | 179 | ||
185 | init_new_thread_signals(); | 180 | init_new_thread_signals(); |
186 | err = set_interval(1); | 181 | err = set_interval(1); |
187 | if(err) | 182 | if (err) |
188 | panic("userspace_tramp - setting timer failed, errno = %d\n", | 183 | panic("userspace_tramp - setting timer failed, errno = %d\n", |
189 | err); | 184 | err); |
190 | 185 | ||
191 | if(!proc_mm){ | 186 | if (!proc_mm) { |
192 | /* This has a pte, but it can't be mapped in with the usual | 187 | /* |
188 | * This has a pte, but it can't be mapped in with the usual | ||
193 | * tlb_flush mechanism because this is part of that mechanism | 189 | * tlb_flush mechanism because this is part of that mechanism |
194 | */ | 190 | */ |
195 | int fd; | 191 | int fd; |
196 | __u64 offset; | 192 | unsigned long long offset; |
197 | fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); | 193 | fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); |
198 | addr = mmap64((void *) UML_CONFIG_STUB_CODE, UM_KERN_PAGE_SIZE, | 194 | addr = mmap64((void *) UML_CONFIG_STUB_CODE, UM_KERN_PAGE_SIZE, |
199 | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); | 195 | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); |
200 | if(addr == MAP_FAILED){ | 196 | if (addr == MAP_FAILED) { |
201 | printk("mapping mmap stub failed, errno = %d\n", | 197 | printk(UM_KERN_ERR "mapping mmap stub failed, " |
202 | errno); | 198 | "errno = %d\n", errno); |
203 | exit(1); | 199 | exit(1); |
204 | } | 200 | } |
205 | 201 | ||
206 | if(stack != NULL){ | 202 | if (stack != NULL) { |
207 | fd = phys_mapping(to_phys(stack), &offset); | 203 | fd = phys_mapping(to_phys(stack), &offset); |
208 | addr = mmap((void *) UML_CONFIG_STUB_DATA, | 204 | addr = mmap((void *) UML_CONFIG_STUB_DATA, |
209 | UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, | 205 | UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, |
210 | MAP_FIXED | MAP_SHARED, fd, offset); | 206 | MAP_FIXED | MAP_SHARED, fd, offset); |
211 | if(addr == MAP_FAILED){ | 207 | if (addr == MAP_FAILED) { |
212 | printk("mapping segfault stack failed, " | 208 | printk(UM_KERN_ERR "mapping segfault stack " |
213 | "errno = %d\n", errno); | 209 | "failed, errno = %d\n", errno); |
214 | exit(1); | 210 | exit(1); |
215 | } | 211 | } |
216 | } | 212 | } |
217 | } | 213 | } |
218 | if(!ptrace_faultinfo && (stack != NULL)){ | 214 | if (!ptrace_faultinfo && (stack != NULL)) { |
219 | struct sigaction sa; | 215 | struct sigaction sa; |
220 | 216 | ||
221 | unsigned long v = UML_CONFIG_STUB_CODE + | 217 | unsigned long v = UML_CONFIG_STUB_CODE + |
@@ -232,13 +228,13 @@ static int userspace_tramp(void *stack) | |||
232 | sa.sa_flags = SA_ONSTACK; | 228 | sa.sa_flags = SA_ONSTACK; |
233 | sa.sa_handler = (void *) v; | 229 | sa.sa_handler = (void *) v; |
234 | sa.sa_restorer = NULL; | 230 | sa.sa_restorer = NULL; |
235 | if(sigaction(SIGSEGV, &sa, NULL) < 0) | 231 | if (sigaction(SIGSEGV, &sa, NULL) < 0) |
236 | panic("userspace_tramp - setting SIGSEGV handler " | 232 | panic("userspace_tramp - setting SIGSEGV handler " |
237 | "failed - errno = %d\n", errno); | 233 | "failed - errno = %d\n", errno); |
238 | } | 234 | } |
239 | 235 | ||
240 | os_stop_process(os_getpid()); | 236 | os_stop_process(os_getpid()); |
241 | return(0); | 237 | return 0; |
242 | } | 238 | } |
243 | 239 | ||
244 | /* Each element set once, and only accessed by a single processor anyway */ | 240 | /* Each element set once, and only accessed by a single processor anyway */ |
@@ -255,35 +251,38 @@ int start_userspace(unsigned long stub_stack) | |||
255 | stack = mmap(NULL, UM_KERN_PAGE_SIZE, | 251 | stack = mmap(NULL, UM_KERN_PAGE_SIZE, |
256 | PROT_READ | PROT_WRITE | PROT_EXEC, | 252 | PROT_READ | PROT_WRITE | PROT_EXEC, |
257 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 253 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
258 | if(stack == MAP_FAILED) | 254 | if (stack == MAP_FAILED) |
259 | panic("start_userspace : mmap failed, errno = %d", errno); | 255 | panic("start_userspace : mmap failed, errno = %d", errno); |
260 | sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); | 256 | sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); |
261 | 257 | ||
262 | flags = CLONE_FILES | SIGCHLD; | 258 | flags = CLONE_FILES | SIGCHLD; |
263 | if(proc_mm) flags |= CLONE_VM; | 259 | if (proc_mm) |
260 | flags |= CLONE_VM; | ||
261 | |||
264 | pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); | 262 | pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); |
265 | if(pid < 0) | 263 | if (pid < 0) |
266 | panic("start_userspace : clone failed, errno = %d", errno); | 264 | panic("start_userspace : clone failed, errno = %d", errno); |
267 | 265 | ||
268 | do { | 266 | do { |
269 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | 267 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); |
270 | if(n < 0) | 268 | if (n < 0) |
271 | panic("start_userspace : wait failed, errno = %d", | 269 | panic("start_userspace : wait failed, errno = %d", |
272 | errno); | 270 | errno); |
273 | } while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); | 271 | } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); |
274 | 272 | ||
275 | if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) | 273 | if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) |
276 | panic("start_userspace : expected SIGSTOP, got status = %d", | 274 | panic("start_userspace : expected SIGSTOP, got status = %d", |
277 | status); | 275 | status); |
278 | 276 | ||
279 | if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0) | 277 | if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, |
280 | panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n", | 278 | (void *) PTRACE_O_TRACESYSGOOD) < 0) |
281 | errno); | 279 | panic("start_userspace : PTRACE_OLDSETOPTIONS failed, " |
280 | "errno = %d\n", errno); | ||
282 | 281 | ||
283 | if(munmap(stack, UM_KERN_PAGE_SIZE) < 0) | 282 | if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) |
284 | panic("start_userspace : munmap failed, errno = %d\n", errno); | 283 | panic("start_userspace : munmap failed, errno = %d\n", errno); |
285 | 284 | ||
286 | return(pid); | 285 | return pid; |
287 | } | 286 | } |
288 | 287 | ||
289 | void userspace(struct uml_pt_regs *regs) | 288 | void userspace(struct uml_pt_regs *regs) |
@@ -292,7 +291,7 @@ void userspace(struct uml_pt_regs *regs) | |||
292 | /* To prevent races if using_sysemu changes under us.*/ | 291 | /* To prevent races if using_sysemu changes under us.*/ |
293 | int local_using_sysemu; | 292 | int local_using_sysemu; |
294 | 293 | ||
295 | while(1){ | 294 | while (1) { |
296 | restore_registers(pid, regs); | 295 | restore_registers(pid, regs); |
297 | 296 | ||
298 | /* Now we set local_using_sysemu to be used for one loop */ | 297 | /* Now we set local_using_sysemu to be used for one loop */ |
@@ -302,13 +301,13 @@ void userspace(struct uml_pt_regs *regs) | |||
302 | singlestepping(NULL)); | 301 | singlestepping(NULL)); |
303 | 302 | ||
304 | err = ptrace(op, pid, 0, 0); | 303 | err = ptrace(op, pid, 0, 0); |
305 | if(err) | 304 | if (err) |
306 | panic("userspace - could not resume userspace process, " | 305 | panic("userspace - could not resume userspace process, " |
307 | "pid=%d, ptrace operation = %d, errno = %d\n", | 306 | "pid=%d, ptrace operation = %d, errno = %d\n", |
308 | pid, op, errno); | 307 | pid, op, errno); |
309 | 308 | ||
310 | CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); | 309 | CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); |
311 | if(err < 0) | 310 | if (err < 0) |
312 | panic("userspace - waitpid failed, errno = %d\n", | 311 | panic("userspace - waitpid failed, errno = %d\n", |
313 | errno); | 312 | errno); |
314 | 313 | ||
@@ -316,12 +315,14 @@ void userspace(struct uml_pt_regs *regs) | |||
316 | save_registers(pid, regs); | 315 | save_registers(pid, regs); |
317 | UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ | 316 | UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ |
318 | 317 | ||
319 | if(WIFSTOPPED(status)){ | 318 | if (WIFSTOPPED(status)) { |
320 | int sig = WSTOPSIG(status); | 319 | int sig = WSTOPSIG(status); |
321 | switch(sig){ | 320 | switch(sig) { |
322 | case SIGSEGV: | 321 | case SIGSEGV: |
323 | if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo){ | 322 | if (PTRACE_FULL_FAULTINFO || |
324 | get_skas_faultinfo(pid, ®s->faultinfo); | 323 | !ptrace_faultinfo) { |
324 | get_skas_faultinfo(pid, | ||
325 | ®s->faultinfo); | ||
325 | (*sig_info[SIGSEGV])(SIGSEGV, regs); | 326 | (*sig_info[SIGSEGV])(SIGSEGV, regs); |
326 | } | 327 | } |
327 | else handle_segv(pid, regs); | 328 | else handle_segv(pid, regs); |
@@ -343,14 +344,14 @@ void userspace(struct uml_pt_regs *regs) | |||
343 | unblock_signals(); | 344 | unblock_signals(); |
344 | break; | 345 | break; |
345 | default: | 346 | default: |
346 | printk("userspace - child stopped with signal " | 347 | printk(UM_KERN_ERR "userspace - child stopped " |
347 | "%d\n", sig); | 348 | "with signal %d\n", sig); |
348 | } | 349 | } |
349 | pid = userspace_pid[0]; | 350 | pid = userspace_pid[0]; |
350 | interrupt_end(); | 351 | interrupt_end(); |
351 | 352 | ||
352 | /* Avoid -ERESTARTSYS handling in host */ | 353 | /* Avoid -ERESTARTSYS handling in host */ |
353 | if(PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) | 354 | if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) |
354 | PT_SYSCALL_NR(regs->regs) = -1; | 355 | PT_SYSCALL_NR(regs->regs) = -1; |
355 | } | 356 | } |
356 | } | 357 | } |
@@ -384,7 +385,8 @@ int copy_context_skas0(unsigned long new_stack, int pid) | |||
384 | __u64 new_offset; | 385 | __u64 new_offset; |
385 | int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); | 386 | int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); |
386 | 387 | ||
387 | /* prepare offset and fd of child's stack as argument for parent's | 388 | /* |
389 | * prepare offset and fd of child's stack as argument for parent's | ||
388 | * and child's mmap2 calls | 390 | * and child's mmap2 calls |
389 | */ | 391 | */ |
390 | *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), | 392 | *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), |
@@ -393,28 +395,30 @@ int copy_context_skas0(unsigned long new_stack, int pid) | |||
393 | { { 0, 1000000 / hz() }, | 395 | { { 0, 1000000 / hz() }, |
394 | { 0, 1000000 / hz() }})}); | 396 | { 0, 1000000 / hz() }})}); |
395 | err = ptrace_setregs(pid, thread_regs); | 397 | err = ptrace_setregs(pid, thread_regs); |
396 | if(err < 0) | 398 | if (err < 0) |
397 | panic("copy_context_skas0 : PTRACE_SETREGS failed, " | 399 | panic("copy_context_skas0 : PTRACE_SETREGS failed, " |
398 | "pid = %d, errno = %d\n", pid, -err); | 400 | "pid = %d, errno = %d\n", pid, -err); |
399 | 401 | ||
400 | /* set a well known return code for detection of child write failure */ | 402 | /* set a well known return code for detection of child write failure */ |
401 | child_data->err = 12345678; | 403 | child_data->err = 12345678; |
402 | 404 | ||
403 | /* Wait, until parent has finished its work: read child's pid from | 405 | /* |
406 | * Wait, until parent has finished its work: read child's pid from | ||
404 | * parent's stack, and check, if bad result. | 407 | * parent's stack, and check, if bad result. |
405 | */ | 408 | */ |
406 | err = ptrace(PTRACE_CONT, pid, 0, 0); | 409 | err = ptrace(PTRACE_CONT, pid, 0, 0); |
407 | if(err) | 410 | if (err) |
408 | panic("Failed to continue new process, pid = %d, " | 411 | panic("Failed to continue new process, pid = %d, " |
409 | "errno = %d\n", pid, errno); | 412 | "errno = %d\n", pid, errno); |
410 | wait_stub_done(pid); | 413 | wait_stub_done(pid); |
411 | 414 | ||
412 | pid = data->err; | 415 | pid = data->err; |
413 | if(pid < 0) | 416 | if (pid < 0) |
414 | panic("copy_context_skas0 - stub-parent reports error %d\n", | 417 | panic("copy_context_skas0 - stub-parent reports error %d\n", |
415 | -pid); | 418 | -pid); |
416 | 419 | ||
417 | /* Wait, until child has finished too: read child's result from | 420 | /* |
421 | * Wait, until child has finished too: read child's result from | ||
418 | * child's stack and check it. | 422 | * child's stack and check it. |
419 | */ | 423 | */ |
420 | wait_stub_done(pid); | 424 | wait_stub_done(pid); |
@@ -455,15 +459,16 @@ void map_stub_pages(int fd, unsigned long code, | |||
455 | .offset = code_offset | 459 | .offset = code_offset |
456 | } } }); | 460 | } } }); |
457 | CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); | 461 | CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); |
458 | if(n != sizeof(mmop)){ | 462 | if (n != sizeof(mmop)) { |
459 | n = errno; | 463 | n = errno; |
460 | printk("mmap args - addr = 0x%lx, fd = %d, offset = %llx\n", | 464 | printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, " |
461 | code, code_fd, (unsigned long long) code_offset); | 465 | "offset = %llx\n", code, code_fd, |
466 | (unsigned long long) code_offset); | ||
462 | panic("map_stub_pages : /proc/mm map for code failed, " | 467 | panic("map_stub_pages : /proc/mm map for code failed, " |
463 | "err = %d\n", n); | 468 | "err = %d\n", n); |
464 | } | 469 | } |
465 | 470 | ||
466 | if ( stack ) { | 471 | if (stack) { |
467 | __u64 map_offset; | 472 | __u64 map_offset; |
468 | int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); | 473 | int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); |
469 | mmop = ((struct proc_mm_op) | 474 | mmop = ((struct proc_mm_op) |
@@ -478,7 +483,7 @@ void map_stub_pages(int fd, unsigned long code, | |||
478 | .offset = map_offset | 483 | .offset = map_offset |
479 | } } }); | 484 | } } }); |
480 | CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); | 485 | CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); |
481 | if(n != sizeof(mmop)) | 486 | if (n != sizeof(mmop)) |
482 | panic("map_stub_pages : /proc/mm map for data failed, " | 487 | panic("map_stub_pages : /proc/mm map for data failed, " |
483 | "err = %d\n", errno); | 488 | "err = %d\n", errno); |
484 | } | 489 | } |
@@ -498,7 +503,7 @@ void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)) | |||
498 | 503 | ||
499 | void switch_threads(jmp_buf *me, jmp_buf *you) | 504 | void switch_threads(jmp_buf *me, jmp_buf *you) |
500 | { | 505 | { |
501 | if(UML_SETJMP(me) == 0) | 506 | if (UML_SETJMP(me) == 0) |
502 | UML_LONGJMP(you, 1); | 507 | UML_LONGJMP(you, 1); |
503 | } | 508 | } |
504 | 509 | ||
@@ -526,7 +531,7 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf) | |||
526 | * after returning to the jumper. | 531 | * after returning to the jumper. |
527 | */ | 532 | */ |
528 | n = setjmp(initial_jmpbuf); | 533 | n = setjmp(initial_jmpbuf); |
529 | switch(n){ | 534 | switch(n) { |
530 | case INIT_JMP_NEW_THREAD: | 535 | case INIT_JMP_NEW_THREAD: |
531 | (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler; | 536 | (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler; |
532 | (*switch_buf)[0].JB_SP = (unsigned long) stack + | 537 | (*switch_buf)[0].JB_SP = (unsigned long) stack + |
@@ -538,10 +543,10 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf) | |||
538 | break; | 543 | break; |
539 | case INIT_JMP_HALT: | 544 | case INIT_JMP_HALT: |
540 | kmalloc_ok = 0; | 545 | kmalloc_ok = 0; |
541 | return(0); | 546 | return 0; |
542 | case INIT_JMP_REBOOT: | 547 | case INIT_JMP_REBOOT: |
543 | kmalloc_ok = 0; | 548 | kmalloc_ok = 0; |
544 | return(1); | 549 | return 1; |
545 | default: | 550 | default: |
546 | panic("Bad sigsetjmp return in start_idle_thread - %d\n", n); | 551 | panic("Bad sigsetjmp return in start_idle_thread - %d\n", n); |
547 | } | 552 | } |
@@ -557,7 +562,7 @@ void initial_thread_cb_skas(void (*proc)(void *), void *arg) | |||
557 | cb_back = &here; | 562 | cb_back = &here; |
558 | 563 | ||
559 | block_signals(); | 564 | block_signals(); |
560 | if(UML_SETJMP(&here) == 0) | 565 | if (UML_SETJMP(&here) == 0) |
561 | UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); | 566 | UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); |
562 | unblock_signals(); | 567 | unblock_signals(); |
563 | 568 | ||
@@ -583,10 +588,10 @@ void __switch_mm(struct mm_id *mm_idp) | |||
583 | int err; | 588 | int err; |
584 | 589 | ||
585 | /* FIXME: need cpu pid in __switch_mm */ | 590 | /* FIXME: need cpu pid in __switch_mm */ |
586 | if(proc_mm){ | 591 | if (proc_mm) { |
587 | err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, | 592 | err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, |
588 | mm_idp->u.mm_fd); | 593 | mm_idp->u.mm_fd); |
589 | if(err) | 594 | if (err) |
590 | panic("__switch_mm - PTRACE_SWITCH_MM failed, " | 595 | panic("__switch_mm - PTRACE_SWITCH_MM failed, " |
591 | "errno = %d\n", errno); | 596 | "errno = %d\n", errno); |
592 | } | 597 | } |
diff --git a/arch/um/os-Linux/skas/trap.c b/arch/um/os-Linux/skas/trap.c index d43e470227de..e53face44200 100644 --- a/arch/um/os-Linux/skas/trap.c +++ b/arch/um/os-Linux/skas/trap.c | |||
@@ -1,19 +1,23 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <signal.h> | 6 | #if 0 |
7 | #include <errno.h> | ||
8 | #include "kern_util.h" | 7 | #include "kern_util.h" |
9 | #include "as-layout.h" | ||
10 | #include "task.h" | ||
11 | #include "sigcontext.h" | ||
12 | #include "skas.h" | 8 | #include "skas.h" |
13 | #include "ptrace_user.h" | 9 | #include "ptrace_user.h" |
14 | #include "sysdep/ptrace.h" | ||
15 | #include "sysdep/ptrace_user.h" | 10 | #include "sysdep/ptrace_user.h" |
11 | #endif | ||
12 | |||
13 | #include <errno.h> | ||
14 | #include <signal.h> | ||
15 | #include "sysdep/ptrace.h" | ||
16 | #include "kern_constants.h" | ||
17 | #include "as-layout.h" | ||
16 | #include "os.h" | 18 | #include "os.h" |
19 | #include "sigcontext.h" | ||
20 | #include "task.h" | ||
17 | 21 | ||
18 | static struct uml_pt_regs ksig_regs[UM_NR_CPUS]; | 22 | static struct uml_pt_regs ksig_regs[UM_NR_CPUS]; |
19 | 23 | ||
@@ -24,14 +28,16 @@ void sig_handler_common_skas(int sig, void *sc_ptr) | |||
24 | void (*handler)(int, struct uml_pt_regs *); | 28 | void (*handler)(int, struct uml_pt_regs *); |
25 | int save_user, save_errno = errno; | 29 | int save_user, save_errno = errno; |
26 | 30 | ||
27 | /* This is done because to allow SIGSEGV to be delivered inside a SEGV | 31 | /* |
32 | * This is done because to allow SIGSEGV to be delivered inside a SEGV | ||
28 | * handler. This can happen in copy_user, and if SEGV is disabled, | 33 | * handler. This can happen in copy_user, and if SEGV is disabled, |
29 | * the process will die. | 34 | * the process will die. |
30 | * XXX Figure out why this is better than SA_NODEFER | 35 | * XXX Figure out why this is better than SA_NODEFER |
31 | */ | 36 | */ |
32 | if(sig == SIGSEGV) { | 37 | if (sig == SIGSEGV) { |
33 | change_sig(SIGSEGV, 1); | 38 | change_sig(SIGSEGV, 1); |
34 | /* For segfaults, we want the data from the | 39 | /* |
40 | * For segfaults, we want the data from the | ||
35 | * sigcontext. In this case, we don't want to mangle | 41 | * sigcontext. In this case, we don't want to mangle |
36 | * the process registers, so use a static set of | 42 | * the process registers, so use a static set of |
37 | * registers. For other signals, the process | 43 | * registers. For other signals, the process |
@@ -44,11 +50,9 @@ void sig_handler_common_skas(int sig, void *sc_ptr) | |||
44 | 50 | ||
45 | save_user = r->is_user; | 51 | save_user = r->is_user; |
46 | r->is_user = 0; | 52 | r->is_user = 0; |
47 | if ( sig == SIGFPE || sig == SIGSEGV || | 53 | if ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) || |
48 | sig == SIGBUS || sig == SIGILL || | 54 | (sig == SIGILL) || (sig == SIGTRAP)) |
49 | sig == SIGTRAP ) { | ||
50 | GET_FAULTINFO_FROM_SC(r->faultinfo, sc); | 55 | GET_FAULTINFO_FROM_SC(r->faultinfo, sc); |
51 | } | ||
52 | 56 | ||
53 | change_sig(SIGUSR1, 1); | 57 | change_sig(SIGUSR1, 1); |
54 | 58 | ||
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index abfc094c3c49..f22715868929 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c | |||
@@ -1,41 +1,29 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <pty.h> | ||
7 | #include <stdio.h> | 6 | #include <stdio.h> |
8 | #include <stddef.h> | ||
9 | #include <stdarg.h> | ||
10 | #include <stdlib.h> | 7 | #include <stdlib.h> |
11 | #include <string.h> | 8 | #include <stdarg.h> |
12 | #include <unistd.h> | 9 | #include <unistd.h> |
13 | #include <signal.h> | ||
14 | #include <sched.h> | ||
15 | #include <fcntl.h> | ||
16 | #include <errno.h> | 10 | #include <errno.h> |
17 | #include <sys/time.h> | 11 | #include <fcntl.h> |
18 | #include <sys/wait.h> | 12 | #include <sched.h> |
13 | #include <signal.h> | ||
14 | #include <string.h> | ||
19 | #include <sys/mman.h> | 15 | #include <sys/mman.h> |
20 | #include <sys/resource.h> | 16 | #include <sys/ptrace.h> |
17 | #include <sys/stat.h> | ||
18 | #include <sys/wait.h> | ||
21 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
22 | #include <sys/types.h> | ||
23 | #include "kern_util.h" | ||
24 | #include "user.h" | ||
25 | #include "signal_kern.h" | ||
26 | #include "sysdep/ptrace.h" | ||
27 | #include "sysdep/sigcontext.h" | ||
28 | #include "irq_user.h" | ||
29 | #include "ptrace_user.h" | ||
30 | #include "mem_user.h" | ||
31 | #include "init.h" | 20 | #include "init.h" |
32 | #include "os.h" | ||
33 | #include "uml-config.h" | ||
34 | #include "tempfile.h" | ||
35 | #include "kern_constants.h" | 21 | #include "kern_constants.h" |
36 | #include "skas.h" | 22 | #include "os.h" |
37 | #include "skas_ptrace.h" | 23 | #include "mem_user.h" |
24 | #include "ptrace_user.h" | ||
38 | #include "registers.h" | 25 | #include "registers.h" |
26 | #include "skas_ptrace.h" | ||
39 | 27 | ||
40 | static int ptrace_child(void *arg) | 28 | static int ptrace_child(void *arg) |
41 | { | 29 | { |
@@ -44,26 +32,33 @@ static int ptrace_child(void *arg) | |||
44 | int sc_result; | 32 | int sc_result; |
45 | 33 | ||
46 | change_sig(SIGWINCH, 0); | 34 | change_sig(SIGWINCH, 0); |
47 | if(ptrace(PTRACE_TRACEME, 0, 0, 0) < 0){ | 35 | if (ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) { |
48 | perror("ptrace"); | 36 | perror("ptrace"); |
49 | os_kill_process(pid, 0); | 37 | os_kill_process(pid, 0); |
50 | } | 38 | } |
51 | kill(pid, SIGSTOP); | 39 | kill(pid, SIGSTOP); |
52 | 40 | ||
53 | /*This syscall will be intercepted by the parent. Don't call more than | 41 | /* |
54 | * once, please.*/ | 42 | * This syscall will be intercepted by the parent. Don't call more than |
43 | * once, please. | ||
44 | */ | ||
55 | sc_result = os_getpid(); | 45 | sc_result = os_getpid(); |
56 | 46 | ||
57 | if (sc_result == pid) | 47 | if (sc_result == pid) |
58 | ret = 1; /*Nothing modified by the parent, we are running | 48 | /* Nothing modified by the parent, we are running normally. */ |
59 | normally.*/ | 49 | ret = 1; |
60 | else if (sc_result == ppid) | 50 | else if (sc_result == ppid) |
61 | ret = 0; /*Expected in check_ptrace and check_sysemu when they | 51 | /* |
62 | succeed in modifying the stack frame*/ | 52 | * Expected in check_ptrace and check_sysemu when they succeed |
53 | * in modifying the stack frame | ||
54 | */ | ||
55 | ret = 0; | ||
63 | else | 56 | else |
64 | ret = 2; /*Serious trouble! This could be caused by a bug in | 57 | /* Serious trouble! This could be caused by a bug in host 2.6 |
65 | host 2.6 SKAS3/2.6 patch before release -V6, together | 58 | * SKAS3/2.6 patch before release -V6, together with a bug in |
66 | with a bug in the UML code itself.*/ | 59 | * the UML code itself. |
60 | */ | ||
61 | ret = 2; | ||
67 | _exit(ret); | 62 | _exit(ret); |
68 | } | 63 | } |
69 | 64 | ||
@@ -104,16 +99,18 @@ static int start_ptraced_child(void **stack_out) | |||
104 | stack = mmap(NULL, UM_KERN_PAGE_SIZE, | 99 | stack = mmap(NULL, UM_KERN_PAGE_SIZE, |
105 | PROT_READ | PROT_WRITE | PROT_EXEC, | 100 | PROT_READ | PROT_WRITE | PROT_EXEC, |
106 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 101 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
107 | if(stack == MAP_FAILED) | 102 | if (stack == MAP_FAILED) |
108 | fatal_perror("check_ptrace : mmap failed"); | 103 | fatal_perror("check_ptrace : mmap failed"); |
104 | |||
109 | sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); | 105 | sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); |
110 | pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL); | 106 | pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL); |
111 | if(pid < 0) | 107 | if (pid < 0) |
112 | fatal_perror("start_ptraced_child : clone failed"); | 108 | fatal_perror("start_ptraced_child : clone failed"); |
109 | |||
113 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | 110 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); |
114 | if(n < 0) | 111 | if (n < 0) |
115 | fatal_perror("check_ptrace : clone failed"); | 112 | fatal_perror("check_ptrace : clone failed"); |
116 | if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) | 113 | if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) |
117 | fatal("check_ptrace : expected SIGSTOP, got status = %d", | 114 | fatal("check_ptrace : expected SIGSTOP, got status = %d", |
118 | status); | 115 | status); |
119 | 116 | ||
@@ -132,10 +129,10 @@ static int stop_ptraced_child(int pid, void *stack, int exitcode, | |||
132 | { | 129 | { |
133 | int status, n, ret = 0; | 130 | int status, n, ret = 0; |
134 | 131 | ||
135 | if(ptrace(PTRACE_CONT, pid, 0, 0) < 0) | 132 | if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) |
136 | fatal_perror("stop_ptraced_child : ptrace failed"); | 133 | fatal_perror("stop_ptraced_child : ptrace failed"); |
137 | CATCH_EINTR(n = waitpid(pid, &status, 0)); | 134 | CATCH_EINTR(n = waitpid(pid, &status, 0)); |
138 | if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { | 135 | if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { |
139 | int exit_with = WEXITSTATUS(status); | 136 | int exit_with = WEXITSTATUS(status); |
140 | if (exit_with == 2) | 137 | if (exit_with == 2) |
141 | non_fatal("check_ptrace : child exited with status 2. " | 138 | non_fatal("check_ptrace : child exited with status 2. " |
@@ -148,7 +145,7 @@ static int stop_ptraced_child(int pid, void *stack, int exitcode, | |||
148 | ret = -1; | 145 | ret = -1; |
149 | } | 146 | } |
150 | 147 | ||
151 | if(munmap(stack, UM_KERN_PAGE_SIZE) < 0) | 148 | if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) |
152 | fatal_perror("check_ptrace : munmap failed"); | 149 | fatal_perror("check_ptrace : munmap failed"); |
153 | return ret; | 150 | return ret; |
154 | } | 151 | } |
@@ -209,26 +206,26 @@ static void __init check_sysemu(void) | |||
209 | sysemu_supported = 0; | 206 | sysemu_supported = 0; |
210 | pid = start_ptraced_child(&stack); | 207 | pid = start_ptraced_child(&stack); |
211 | 208 | ||
212 | if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0) | 209 | if (ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0) |
213 | goto fail; | 210 | goto fail; |
214 | 211 | ||
215 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | 212 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); |
216 | if (n < 0) | 213 | if (n < 0) |
217 | fatal_perror("check_sysemu : wait failed"); | 214 | fatal_perror("check_sysemu : wait failed"); |
218 | if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) | 215 | if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) |
219 | fatal("check_sysemu : expected SIGTRAP, got status = %d", | 216 | fatal("check_sysemu : expected SIGTRAP, got status = %d", |
220 | status); | 217 | status); |
221 | 218 | ||
222 | if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) | 219 | if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) |
223 | fatal_perror("check_sysemu : PTRACE_GETREGS failed"); | 220 | fatal_perror("check_sysemu : PTRACE_GETREGS failed"); |
224 | if(PT_SYSCALL_NR(regs) != __NR_getpid){ | 221 | if (PT_SYSCALL_NR(regs) != __NR_getpid) { |
225 | non_fatal("check_sysemu got system call number %d, " | 222 | non_fatal("check_sysemu got system call number %d, " |
226 | "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid); | 223 | "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid); |
227 | goto fail; | 224 | goto fail; |
228 | } | 225 | } |
229 | 226 | ||
230 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); | 227 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); |
231 | if(n < 0){ | 228 | if (n < 0) { |
232 | non_fatal("check_sysemu : failed to modify system call " | 229 | non_fatal("check_sysemu : failed to modify system call " |
233 | "return"); | 230 | "return"); |
234 | goto fail; | 231 | goto fail; |
@@ -244,30 +241,31 @@ static void __init check_sysemu(void) | |||
244 | non_fatal("Checking advanced syscall emulation patch for ptrace..."); | 241 | non_fatal("Checking advanced syscall emulation patch for ptrace..."); |
245 | pid = start_ptraced_child(&stack); | 242 | pid = start_ptraced_child(&stack); |
246 | 243 | ||
247 | if((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, | 244 | if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, |
248 | (void *) PTRACE_O_TRACESYSGOOD) < 0)) | 245 | (void *) PTRACE_O_TRACESYSGOOD) < 0)) |
249 | fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); | 246 | fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); |
250 | 247 | ||
251 | while(1){ | 248 | while (1) { |
252 | count++; | 249 | count++; |
253 | if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0) | 250 | if (ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0) |
254 | goto fail; | 251 | goto fail; |
255 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | 252 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); |
256 | if(n < 0) | 253 | if (n < 0) |
257 | fatal_perror("check_ptrace : wait failed"); | 254 | fatal_perror("check_ptrace : wait failed"); |
258 | 255 | ||
259 | if(WIFSTOPPED(status) && (WSTOPSIG(status) == (SIGTRAP|0x80))){ | 256 | if (WIFSTOPPED(status) && |
257 | (WSTOPSIG(status) == (SIGTRAP|0x80))) { | ||
260 | if (!count) | 258 | if (!count) |
261 | fatal("check_ptrace : SYSEMU_SINGLESTEP " | 259 | fatal("check_ptrace : SYSEMU_SINGLESTEP " |
262 | "doesn't singlestep"); | 260 | "doesn't singlestep"); |
263 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, | 261 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, |
264 | os_getpid()); | 262 | os_getpid()); |
265 | if(n < 0) | 263 | if (n < 0) |
266 | fatal_perror("check_sysemu : failed to modify " | 264 | fatal_perror("check_sysemu : failed to modify " |
267 | "system call return"); | 265 | "system call return"); |
268 | break; | 266 | break; |
269 | } | 267 | } |
270 | else if(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) | 268 | else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) |
271 | count++; | 269 | count++; |
272 | else | 270 | else |
273 | fatal("check_ptrace : expected SIGTRAP or " | 271 | fatal("check_ptrace : expected SIGTRAP or " |
@@ -279,7 +277,7 @@ static void __init check_sysemu(void) | |||
279 | sysemu_supported = 2; | 277 | sysemu_supported = 2; |
280 | non_fatal("OK\n"); | 278 | non_fatal("OK\n"); |
281 | 279 | ||
282 | if ( !force_sysemu_disabled ) | 280 | if (!force_sysemu_disabled) |
283 | set_using_sysemu(sysemu_supported); | 281 | set_using_sysemu(sysemu_supported); |
284 | return; | 282 | return; |
285 | 283 | ||
@@ -297,29 +295,29 @@ static void __init check_ptrace(void) | |||
297 | non_fatal("Checking that ptrace can change system call numbers..."); | 295 | non_fatal("Checking that ptrace can change system call numbers..."); |
298 | pid = start_ptraced_child(&stack); | 296 | pid = start_ptraced_child(&stack); |
299 | 297 | ||
300 | if((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, | 298 | if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, |
301 | (void *) PTRACE_O_TRACESYSGOOD) < 0)) | 299 | (void *) PTRACE_O_TRACESYSGOOD) < 0)) |
302 | fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); | 300 | fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); |
303 | 301 | ||
304 | while(1){ | 302 | while (1) { |
305 | if(ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0) | 303 | if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0) |
306 | fatal_perror("check_ptrace : ptrace failed"); | 304 | fatal_perror("check_ptrace : ptrace failed"); |
307 | 305 | ||
308 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | 306 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); |
309 | if(n < 0) | 307 | if (n < 0) |
310 | fatal_perror("check_ptrace : wait failed"); | 308 | fatal_perror("check_ptrace : wait failed"); |
311 | 309 | ||
312 | if(!WIFSTOPPED(status) || | 310 | if (!WIFSTOPPED(status) || |
313 | (WSTOPSIG(status) != (SIGTRAP | 0x80))) | 311 | (WSTOPSIG(status) != (SIGTRAP | 0x80))) |
314 | fatal("check_ptrace : expected (SIGTRAP|0x80), " | 312 | fatal("check_ptrace : expected (SIGTRAP|0x80), " |
315 | "got status = %d", status); | 313 | "got status = %d", status); |
316 | 314 | ||
317 | syscall = ptrace(PTRACE_PEEKUSR, pid, PT_SYSCALL_NR_OFFSET, | 315 | syscall = ptrace(PTRACE_PEEKUSR, pid, PT_SYSCALL_NR_OFFSET, |
318 | 0); | 316 | 0); |
319 | if(syscall == __NR_getpid){ | 317 | if (syscall == __NR_getpid) { |
320 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, | 318 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, |
321 | __NR_getppid); | 319 | __NR_getppid); |
322 | if(n < 0) | 320 | if (n < 0) |
323 | fatal_perror("check_ptrace : failed to modify " | 321 | fatal_perror("check_ptrace : failed to modify " |
324 | "system call"); | 322 | "system call"); |
325 | break; | 323 | break; |
@@ -337,18 +335,18 @@ static void __init check_coredump_limit(void) | |||
337 | struct rlimit lim; | 335 | struct rlimit lim; |
338 | int err = getrlimit(RLIMIT_CORE, &lim); | 336 | int err = getrlimit(RLIMIT_CORE, &lim); |
339 | 337 | ||
340 | if(err){ | 338 | if (err) { |
341 | perror("Getting core dump limit"); | 339 | perror("Getting core dump limit"); |
342 | return; | 340 | return; |
343 | } | 341 | } |
344 | 342 | ||
345 | printf("Core dump limits :\n\tsoft - "); | 343 | printf("Core dump limits :\n\tsoft - "); |
346 | if(lim.rlim_cur == RLIM_INFINITY) | 344 | if (lim.rlim_cur == RLIM_INFINITY) |
347 | printf("NONE\n"); | 345 | printf("NONE\n"); |
348 | else printf("%lu\n", lim.rlim_cur); | 346 | else printf("%lu\n", lim.rlim_cur); |
349 | 347 | ||
350 | printf("\thard - "); | 348 | printf("\thard - "); |
351 | if(lim.rlim_max == RLIM_INFINITY) | 349 | if (lim.rlim_max == RLIM_INFINITY) |
352 | printf("NONE\n"); | 350 | printf("NONE\n"); |
353 | else printf("%lu\n", lim.rlim_max); | 351 | else printf("%lu\n", lim.rlim_max); |
354 | } | 352 | } |
@@ -414,7 +412,7 @@ static inline void check_skas3_ptrace_faultinfo(void) | |||
414 | n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi); | 412 | n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi); |
415 | if (n < 0) { | 413 | if (n < 0) { |
416 | ptrace_faultinfo = 0; | 414 | ptrace_faultinfo = 0; |
417 | if(errno == EIO) | 415 | if (errno == EIO) |
418 | non_fatal("not found\n"); | 416 | non_fatal("not found\n"); |
419 | else | 417 | else |
420 | perror("not found"); | 418 | perror("not found"); |
@@ -446,7 +444,7 @@ static inline void check_skas3_ptrace_ldt(void) | |||
446 | 444 | ||
447 | n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op); | 445 | n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op); |
448 | if (n < 0) { | 446 | if (n < 0) { |
449 | if(errno == EIO) | 447 | if (errno == EIO) |
450 | non_fatal("not found\n"); | 448 | non_fatal("not found\n"); |
451 | else { | 449 | else { |
452 | perror("not found"); | 450 | perror("not found"); |
@@ -454,7 +452,7 @@ static inline void check_skas3_ptrace_ldt(void) | |||
454 | ptrace_ldt = 0; | 452 | ptrace_ldt = 0; |
455 | } | 453 | } |
456 | else { | 454 | else { |
457 | if(ptrace_ldt) | 455 | if (ptrace_ldt) |
458 | non_fatal("found\n"); | 456 | non_fatal("found\n"); |
459 | else | 457 | else |
460 | non_fatal("found, but use is disabled\n"); | 458 | non_fatal("found, but use is disabled\n"); |
@@ -477,12 +475,9 @@ static inline void check_skas3_proc_mm(void) | |||
477 | proc_mm = 0; | 475 | proc_mm = 0; |
478 | perror("not found"); | 476 | perror("not found"); |
479 | } | 477 | } |
480 | else { | 478 | else if (!proc_mm) |
481 | if (!proc_mm) | 479 | non_fatal("found but disabled on command line\n"); |
482 | non_fatal("found but disabled on command line\n"); | 480 | else non_fatal("found\n"); |
483 | else | ||
484 | non_fatal("found\n"); | ||
485 | } | ||
486 | } | 481 | } |
487 | 482 | ||
488 | int can_do_skas(void) | 483 | int can_do_skas(void) |
@@ -493,7 +488,7 @@ int can_do_skas(void) | |||
493 | check_skas3_ptrace_faultinfo(); | 488 | check_skas3_ptrace_faultinfo(); |
494 | check_skas3_ptrace_ldt(); | 489 | check_skas3_ptrace_ldt(); |
495 | 490 | ||
496 | if(!proc_mm || !ptrace_faultinfo || !ptrace_ldt) | 491 | if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt) |
497 | skas_needs_stub = 1; | 492 | skas_needs_stub = 1; |
498 | 493 | ||
499 | return 1; | 494 | return 1; |
@@ -508,25 +503,25 @@ int __init parse_iomem(char *str, int *add) | |||
508 | 503 | ||
509 | driver = str; | 504 | driver = str; |
510 | file = strchr(str,','); | 505 | file = strchr(str,','); |
511 | if(file == NULL){ | 506 | if (file == NULL) { |
512 | printf("parse_iomem : failed to parse iomem\n"); | 507 | printf("parse_iomem : failed to parse iomem\n"); |
513 | goto out; | 508 | goto out; |
514 | } | 509 | } |
515 | *file = '\0'; | 510 | *file = '\0'; |
516 | file++; | 511 | file++; |
517 | fd = open(file, O_RDWR, 0); | 512 | fd = open(file, O_RDWR, 0); |
518 | if(fd < 0){ | 513 | if (fd < 0) { |
519 | os_print_error(fd, "parse_iomem - Couldn't open io file"); | 514 | os_print_error(fd, "parse_iomem - Couldn't open io file"); |
520 | goto out; | 515 | goto out; |
521 | } | 516 | } |
522 | 517 | ||
523 | if(fstat64(fd, &buf) < 0){ | 518 | if (fstat64(fd, &buf) < 0) { |
524 | perror("parse_iomem - cannot stat_fd file"); | 519 | perror("parse_iomem - cannot stat_fd file"); |
525 | goto out_close; | 520 | goto out_close; |
526 | } | 521 | } |
527 | 522 | ||
528 | new = malloc(sizeof(*new)); | 523 | new = malloc(sizeof(*new)); |
529 | if(new == NULL){ | 524 | if (new == NULL) { |
530 | perror("Couldn't allocate iomem_region struct"); | 525 | perror("Couldn't allocate iomem_region struct"); |
531 | goto out_close; | 526 | goto out_close; |
532 | } | 527 | } |
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c index e87fb5362f44..be8e029f58b4 100644 --- a/arch/um/os-Linux/trap.c +++ b/arch/um/os-Linux/trap.c | |||
@@ -1,13 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <stdlib.h> | ||
7 | #include <signal.h> | 6 | #include <signal.h> |
8 | #include "kern_util.h" | ||
9 | #include "os.h" | 7 | #include "os.h" |
10 | #include "longjmp.h" | 8 | #include "sysdep/ptrace.h" |
11 | 9 | ||
12 | /* Initialized from linux_main() */ | 10 | /* Initialized from linux_main() */ |
13 | void (*sig_info[NSIG])(int, struct uml_pt_regs *); | 11 | void (*sig_info[NSIG])(int, struct uml_pt_regs *); |
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c index e36541e5ec00..106fa8641553 100644 --- a/arch/um/os-Linux/umid.c +++ b/arch/um/os-Linux/umid.c | |||
@@ -1,14 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
1 | #include <stdio.h> | 6 | #include <stdio.h> |
2 | #include <unistd.h> | ||
3 | #include <stdlib.h> | 7 | #include <stdlib.h> |
4 | #include <string.h> | 8 | #include <dirent.h> |
5 | #include <errno.h> | 9 | #include <errno.h> |
10 | #include <fcntl.h> | ||
6 | #include <signal.h> | 11 | #include <signal.h> |
7 | #include <dirent.h> | 12 | #include <string.h> |
8 | #include <sys/fcntl.h> | 13 | #include <unistd.h> |
9 | #include <sys/stat.h> | 14 | #include <sys/stat.h> |
10 | #include <sys/param.h> | ||
11 | #include "init.h" | 15 | #include "init.h" |
16 | #include "kern_constants.h" | ||
12 | #include "os.h" | 17 | #include "os.h" |
13 | #include "user.h" | 18 | #include "user.h" |
14 | 19 | ||
@@ -27,13 +32,13 @@ static int __init make_uml_dir(void) | |||
27 | char dir[512] = { '\0' }; | 32 | char dir[512] = { '\0' }; |
28 | int len, err; | 33 | int len, err; |
29 | 34 | ||
30 | if(*uml_dir == '~'){ | 35 | if (*uml_dir == '~') { |
31 | char *home = getenv("HOME"); | 36 | char *home = getenv("HOME"); |
32 | 37 | ||
33 | err = -ENOENT; | 38 | err = -ENOENT; |
34 | if(home == NULL){ | 39 | if (home == NULL) { |
35 | printk("make_uml_dir : no value in environment for " | 40 | printk(UM_KERN_ERR "make_uml_dir : no value in " |
36 | "$HOME\n"); | 41 | "environment for $HOME\n"); |
37 | goto err; | 42 | goto err; |
38 | } | 43 | } |
39 | strlcpy(dir, home, sizeof(dir)); | 44 | strlcpy(dir, home, sizeof(dir)); |
@@ -52,7 +57,7 @@ static int __init make_uml_dir(void) | |||
52 | } | 57 | } |
53 | strcpy(uml_dir, dir); | 58 | strcpy(uml_dir, dir); |
54 | 59 | ||
55 | if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){ | 60 | if ((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)) { |
56 | printf("Failed to mkdir '%s': %s\n", uml_dir, strerror(errno)); | 61 | printf("Failed to mkdir '%s': %s\n", uml_dir, strerror(errno)); |
57 | err = -errno; | 62 | err = -errno; |
58 | goto err_free; | 63 | goto err_free; |
@@ -69,8 +74,8 @@ err: | |||
69 | /* | 74 | /* |
70 | * Unlinks the files contained in @dir and then removes @dir. | 75 | * Unlinks the files contained in @dir and then removes @dir. |
71 | * Doesn't handle directory trees, so it's not like rm -rf, but almost such. We | 76 | * Doesn't handle directory trees, so it's not like rm -rf, but almost such. We |
72 | * ignore ENOENT errors for anything (they happen, strangely enough - possibly due | 77 | * ignore ENOENT errors for anything (they happen, strangely enough - possibly |
73 | * to races between multiple dying UML threads). | 78 | * due to races between multiple dying UML threads). |
74 | */ | 79 | */ |
75 | static int remove_files_and_dir(char *dir) | 80 | static int remove_files_and_dir(char *dir) |
76 | { | 81 | { |
@@ -115,7 +120,8 @@ out: | |||
115 | return ret; | 120 | return ret; |
116 | } | 121 | } |
117 | 122 | ||
118 | /* This says that there isn't already a user of the specified directory even if | 123 | /* |
124 | * This says that there isn't already a user of the specified directory even if | ||
119 | * there are errors during the checking. This is because if these errors | 125 | * there are errors during the checking. This is because if these errors |
120 | * happen, the directory is unusable by the pre-existing UML, so we might as | 126 | * happen, the directory is unusable by the pre-existing UML, so we might as |
121 | * well take it over. This could happen either by | 127 | * well take it over. This could happen either by |
@@ -133,44 +139,45 @@ static inline int is_umdir_used(char *dir) | |||
133 | int dead, fd, p, n, err; | 139 | int dead, fd, p, n, err; |
134 | 140 | ||
135 | n = snprintf(file, sizeof(file), "%s/pid", dir); | 141 | n = snprintf(file, sizeof(file), "%s/pid", dir); |
136 | if(n >= sizeof(file)){ | 142 | if (n >= sizeof(file)) { |
137 | printk("is_umdir_used - pid filename too long\n"); | 143 | printk(UM_KERN_ERR "is_umdir_used - pid filename too long\n"); |
138 | err = -E2BIG; | 144 | err = -E2BIG; |
139 | goto out; | 145 | goto out; |
140 | } | 146 | } |
141 | 147 | ||
142 | dead = 0; | 148 | dead = 0; |
143 | fd = open(file, O_RDONLY); | 149 | fd = open(file, O_RDONLY); |
144 | if(fd < 0) { | 150 | if (fd < 0) { |
145 | fd = -errno; | 151 | fd = -errno; |
146 | if(fd != -ENOENT){ | 152 | if (fd != -ENOENT) { |
147 | printk("is_umdir_used : couldn't open pid file '%s', " | 153 | printk(UM_KERN_ERR "is_umdir_used : couldn't open pid " |
148 | "err = %d\n", file, -fd); | 154 | "file '%s', err = %d\n", file, -fd); |
149 | } | 155 | } |
150 | goto out; | 156 | goto out; |
151 | } | 157 | } |
152 | 158 | ||
153 | err = 0; | 159 | err = 0; |
154 | n = read(fd, pid, sizeof(pid)); | 160 | n = read(fd, pid, sizeof(pid)); |
155 | if(n < 0){ | 161 | if (n < 0) { |
156 | printk("is_umdir_used : couldn't read pid file '%s', " | 162 | printk(UM_KERN_ERR "is_umdir_used : couldn't read pid file " |
157 | "err = %d\n", file, errno); | 163 | "'%s', err = %d\n", file, errno); |
158 | goto out_close; | 164 | goto out_close; |
159 | } else if(n == 0){ | 165 | } else if (n == 0) { |
160 | printk("is_umdir_used : couldn't read pid file '%s', " | 166 | printk(UM_KERN_ERR "is_umdir_used : couldn't read pid file " |
161 | "0-byte read\n", file); | 167 | "'%s', 0-byte read\n", file); |
162 | goto out_close; | 168 | goto out_close; |
163 | } | 169 | } |
164 | 170 | ||
165 | p = strtoul(pid, &end, 0); | 171 | p = strtoul(pid, &end, 0); |
166 | if(end == pid){ | 172 | if (end == pid) { |
167 | printk("is_umdir_used : couldn't parse pid file '%s', " | 173 | printk(UM_KERN_ERR "is_umdir_used : couldn't parse pid file " |
168 | "errno = %d\n", file, errno); | 174 | "'%s', errno = %d\n", file, errno); |
169 | goto out_close; | 175 | goto out_close; |
170 | } | 176 | } |
171 | 177 | ||
172 | if((kill(p, 0) == 0) || (errno != ESRCH)){ | 178 | if ((kill(p, 0) == 0) || (errno != ESRCH)) { |
173 | printk("umid \"%s\" is already in use by pid %d\n", umid, p); | 179 | printk(UM_KERN_ERR "umid \"%s\" is already in use by pid %d\n", |
180 | umid, p); | ||
174 | return 1; | 181 | return 1; |
175 | } | 182 | } |
176 | 183 | ||
@@ -194,8 +201,8 @@ static int umdir_take_if_dead(char *dir) | |||
194 | 201 | ||
195 | ret = remove_files_and_dir(dir); | 202 | ret = remove_files_and_dir(dir); |
196 | if (ret) { | 203 | if (ret) { |
197 | printk("is_umdir_used - remove_files_and_dir failed with " | 204 | printk(UM_KERN_ERR "is_umdir_used - remove_files_and_dir " |
198 | "err = %d\n", ret); | 205 | "failed with err = %d\n", ret); |
199 | } | 206 | } |
200 | return ret; | 207 | return ret; |
201 | } | 208 | } |
@@ -206,27 +213,28 @@ static void __init create_pid_file(void) | |||
206 | char pid[sizeof("nnnnn\0")]; | 213 | char pid[sizeof("nnnnn\0")]; |
207 | int fd, n; | 214 | int fd, n; |
208 | 215 | ||
209 | if(umid_file_name("pid", file, sizeof(file))) | 216 | if (umid_file_name("pid", file, sizeof(file))) |
210 | return; | 217 | return; |
211 | 218 | ||
212 | fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644); | 219 | fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644); |
213 | if(fd < 0){ | 220 | if (fd < 0) { |
214 | printk("Open of machine pid file \"%s\" failed: %s\n", | 221 | printk(UM_KERN_ERR "Open of machine pid file \"%s\" failed: " |
215 | file, strerror(errno)); | 222 | "%s\n", file, strerror(errno)); |
216 | return; | 223 | return; |
217 | } | 224 | } |
218 | 225 | ||
219 | snprintf(pid, sizeof(pid), "%d\n", getpid()); | 226 | snprintf(pid, sizeof(pid), "%d\n", getpid()); |
220 | n = write(fd, pid, strlen(pid)); | 227 | n = write(fd, pid, strlen(pid)); |
221 | if(n != strlen(pid)) | 228 | if (n != strlen(pid)) |
222 | printk("Write of pid file failed - err = %d\n", errno); | 229 | printk(UM_KERN_ERR "Write of pid file failed - err = %d\n", |
230 | errno); | ||
223 | 231 | ||
224 | close(fd); | 232 | close(fd); |
225 | } | 233 | } |
226 | 234 | ||
227 | int __init set_umid(char *name) | 235 | int __init set_umid(char *name) |
228 | { | 236 | { |
229 | if(strlen(name) > UMID_LEN - 1) | 237 | if (strlen(name) > UMID_LEN - 1) |
230 | return -E2BIG; | 238 | return -E2BIG; |
231 | 239 | ||
232 | strlcpy(umid, name, sizeof(umid)); | 240 | strlcpy(umid, name, sizeof(umid)); |
@@ -242,18 +250,18 @@ int __init make_umid(void) | |||
242 | int fd, err; | 250 | int fd, err; |
243 | char tmp[256]; | 251 | char tmp[256]; |
244 | 252 | ||
245 | if(umid_setup) | 253 | if (umid_setup) |
246 | return 0; | 254 | return 0; |
247 | 255 | ||
248 | make_uml_dir(); | 256 | make_uml_dir(); |
249 | 257 | ||
250 | if(*umid == '\0'){ | 258 | if (*umid == '\0') { |
251 | strlcpy(tmp, uml_dir, sizeof(tmp)); | 259 | strlcpy(tmp, uml_dir, sizeof(tmp)); |
252 | strlcat(tmp, "XXXXXX", sizeof(tmp)); | 260 | strlcat(tmp, "XXXXXX", sizeof(tmp)); |
253 | fd = mkstemp(tmp); | 261 | fd = mkstemp(tmp); |
254 | if(fd < 0){ | 262 | if (fd < 0) { |
255 | printk("make_umid - mkstemp(%s) failed: %s\n", | 263 | printk(UM_KERN_ERR "make_umid - mkstemp(%s) failed: " |
256 | tmp, strerror(errno)); | 264 | "%s\n", tmp, strerror(errno)); |
257 | err = -errno; | 265 | err = -errno; |
258 | goto err; | 266 | goto err; |
259 | } | 267 | } |
@@ -262,11 +270,12 @@ int __init make_umid(void) | |||
262 | 270 | ||
263 | set_umid(&tmp[strlen(uml_dir)]); | 271 | set_umid(&tmp[strlen(uml_dir)]); |
264 | 272 | ||
265 | /* There's a nice tiny little race between this unlink and | 273 | /* |
274 | * There's a nice tiny little race between this unlink and | ||
266 | * the mkdir below. It'd be nice if there were a mkstemp | 275 | * the mkdir below. It'd be nice if there were a mkstemp |
267 | * for directories. | 276 | * for directories. |
268 | */ | 277 | */ |
269 | if(unlink(tmp)){ | 278 | if (unlink(tmp)) { |
270 | err = -errno; | 279 | err = -errno; |
271 | goto err; | 280 | goto err; |
272 | } | 281 | } |
@@ -274,9 +283,9 @@ int __init make_umid(void) | |||
274 | 283 | ||
275 | snprintf(tmp, sizeof(tmp), "%s%s", uml_dir, umid); | 284 | snprintf(tmp, sizeof(tmp), "%s%s", uml_dir, umid); |
276 | err = mkdir(tmp, 0777); | 285 | err = mkdir(tmp, 0777); |
277 | if(err < 0){ | 286 | if (err < 0) { |
278 | err = -errno; | 287 | err = -errno; |
279 | if(err != -EEXIST) | 288 | if (err != -EEXIST) |
280 | goto err; | 289 | goto err; |
281 | 290 | ||
282 | if (umdir_take_if_dead(tmp) < 0) | 291 | if (umdir_take_if_dead(tmp) < 0) |
@@ -284,9 +293,10 @@ int __init make_umid(void) | |||
284 | 293 | ||
285 | err = mkdir(tmp, 0777); | 294 | err = mkdir(tmp, 0777); |
286 | } | 295 | } |
287 | if(err){ | 296 | if (err) { |
288 | err = -errno; | 297 | err = -errno; |
289 | printk("Failed to create '%s' - err = %d\n", umid, -errno); | 298 | printk(UM_KERN_ERR "Failed to create '%s' - err = %d\n", umid, |
299 | errno); | ||
290 | goto err; | 300 | goto err; |
291 | } | 301 | } |
292 | 302 | ||
@@ -301,14 +311,15 @@ int __init make_umid(void) | |||
301 | 311 | ||
302 | static int __init make_umid_init(void) | 312 | static int __init make_umid_init(void) |
303 | { | 313 | { |
304 | if(!make_umid()) | 314 | if (!make_umid()) |
305 | return 0; | 315 | return 0; |
306 | 316 | ||
307 | /* If initializing with the given umid failed, then try again with | 317 | /* |
318 | * If initializing with the given umid failed, then try again with | ||
308 | * a random one. | 319 | * a random one. |
309 | */ | 320 | */ |
310 | printk("Failed to initialize umid \"%s\", trying with a random umid\n", | 321 | printk(UM_KERN_ERR "Failed to initialize umid \"%s\", trying with a " |
311 | umid); | 322 | "random umid\n", umid); |
312 | *umid = '\0'; | 323 | *umid = '\0'; |
313 | make_umid(); | 324 | make_umid(); |
314 | 325 | ||
@@ -322,12 +333,12 @@ int __init umid_file_name(char *name, char *buf, int len) | |||
322 | int n, err; | 333 | int n, err; |
323 | 334 | ||
324 | err = make_umid(); | 335 | err = make_umid(); |
325 | if(err) | 336 | if (err) |
326 | return err; | 337 | return err; |
327 | 338 | ||
328 | n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name); | 339 | n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name); |
329 | if(n >= len){ | 340 | if (n >= len) { |
330 | printk("umid_file_name : buffer too short\n"); | 341 | printk(UM_KERN_ERR "umid_file_name : buffer too short\n"); |
331 | return -E2BIG; | 342 | return -E2BIG; |
332 | } | 343 | } |
333 | 344 | ||
@@ -341,21 +352,22 @@ char *get_umid(void) | |||
341 | 352 | ||
342 | static int __init set_uml_dir(char *name, int *add) | 353 | static int __init set_uml_dir(char *name, int *add) |
343 | { | 354 | { |
344 | if(*name == '\0'){ | 355 | if (*name == '\0') { |
345 | printf("uml_dir can't be an empty string\n"); | 356 | printf("uml_dir can't be an empty string\n"); |
346 | return 0; | 357 | return 0; |
347 | } | 358 | } |
348 | 359 | ||
349 | if(name[strlen(name) - 1] == '/'){ | 360 | if (name[strlen(name) - 1] == '/') { |
350 | uml_dir = name; | 361 | uml_dir = name; |
351 | return 0; | 362 | return 0; |
352 | } | 363 | } |
353 | 364 | ||
354 | uml_dir = malloc(strlen(name) + 2); | 365 | uml_dir = malloc(strlen(name) + 2); |
355 | if(uml_dir == NULL){ | 366 | if (uml_dir == NULL) { |
356 | printf("Failed to malloc uml_dir - error = %d\n", errno); | 367 | printf("Failed to malloc uml_dir - error = %d\n", errno); |
357 | 368 | ||
358 | /* Return 0 here because do_initcalls doesn't look at | 369 | /* |
370 | * Return 0 here because do_initcalls doesn't look at | ||
359 | * the return value. | 371 | * the return value. |
360 | */ | 372 | */ |
361 | return 0; | 373 | return 0; |
@@ -376,7 +388,7 @@ static void remove_umid_dir(void) | |||
376 | 388 | ||
377 | sprintf(dir, "%s%s", uml_dir, umid); | 389 | sprintf(dir, "%s%s", uml_dir, umid); |
378 | err = remove_files_and_dir(dir); | 390 | err = remove_files_and_dir(dir); |
379 | if(err) | 391 | if (err) |
380 | printf("remove_umid_dir - remove_files_and_dir failed with " | 392 | printf("remove_umid_dir - remove_files_and_dir failed with " |
381 | "err = %d\n", err); | 393 | "err = %d\n", err); |
382 | } | 394 | } |
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c index 25c1165d8093..806895d73bcc 100644 --- a/arch/um/sys-i386/bugs.c +++ b/arch/um/sys-i386/bugs.c | |||
@@ -1,18 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <unistd.h> | ||
7 | #include <errno.h> | 6 | #include <errno.h> |
7 | #include <signal.h> | ||
8 | #include <string.h> | 8 | #include <string.h> |
9 | #include <sys/signal.h> | 9 | #include "kern_constants.h" |
10 | #include <asm/ldt.h> | ||
11 | #include "kern_util.h" | ||
12 | #include "user.h" | ||
13 | #include "sysdep/ptrace.h" | ||
14 | #include "task.h" | ||
15 | #include "os.h" | 10 | #include "os.h" |
11 | #include "task.h" | ||
12 | #include "user.h" | ||
16 | 13 | ||
17 | #define MAXTOKEN 64 | 14 | #define MAXTOKEN 64 |
18 | 15 | ||
@@ -30,18 +27,20 @@ static char token(int fd, char *buf, int len, char stop) | |||
30 | do { | 27 | do { |
31 | n = os_read_file(fd, ptr, sizeof(*ptr)); | 28 | n = os_read_file(fd, ptr, sizeof(*ptr)); |
32 | c = *ptr++; | 29 | c = *ptr++; |
33 | if(n != sizeof(*ptr)){ | 30 | if (n != sizeof(*ptr)) { |
34 | if(n == 0) | 31 | if (n == 0) |
35 | return 0; | 32 | return 0; |
36 | printk("Reading /proc/cpuinfo failed, err = %d\n", -n); | 33 | printk(UM_KERN_ERR "Reading /proc/cpuinfo failed, " |
37 | if(n < 0) | 34 | "err = %d\n", -n); |
35 | if (n < 0) | ||
38 | return n; | 36 | return n; |
39 | else return -EIO; | 37 | else return -EIO; |
40 | } | 38 | } |
41 | } while((c != '\n') && (c != stop) && (ptr < end)); | 39 | } while ((c != '\n') && (c != stop) && (ptr < end)); |
42 | 40 | ||
43 | if(ptr == end){ | 41 | if (ptr == end) { |
44 | printk("Failed to find '%c' in /proc/cpuinfo\n", stop); | 42 | printk(UM_KERN_ERR "Failed to find '%c' in /proc/cpuinfo\n", |
43 | stop); | ||
45 | return -1; | 44 | return -1; |
46 | } | 45 | } |
47 | *(ptr - 1) = '\0'; | 46 | *(ptr - 1) = '\0'; |
@@ -54,26 +53,27 @@ static int find_cpuinfo_line(int fd, char *key, char *scratch, int len) | |||
54 | char c; | 53 | char c; |
55 | 54 | ||
56 | scratch[len - 1] = '\0'; | 55 | scratch[len - 1] = '\0'; |
57 | while(1){ | 56 | while (1) { |
58 | c = token(fd, scratch, len - 1, ':'); | 57 | c = token(fd, scratch, len - 1, ':'); |
59 | if(c <= 0) | 58 | if (c <= 0) |
60 | return 0; | 59 | return 0; |
61 | else if(c != ':'){ | 60 | else if (c != ':') { |
62 | printk("Failed to find ':' in /proc/cpuinfo\n"); | 61 | printk(UM_KERN_ERR "Failed to find ':' in " |
62 | "/proc/cpuinfo\n"); | ||
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
65 | 65 | ||
66 | if(!strncmp(scratch, key, strlen(key))) | 66 | if (!strncmp(scratch, key, strlen(key))) |
67 | return 1; | 67 | return 1; |
68 | 68 | ||
69 | do { | 69 | do { |
70 | n = os_read_file(fd, &c, sizeof(c)); | 70 | n = os_read_file(fd, &c, sizeof(c)); |
71 | if(n != sizeof(c)){ | 71 | if (n != sizeof(c)) { |
72 | printk("Failed to find newline in " | 72 | printk(UM_KERN_ERR "Failed to find newline in " |
73 | "/proc/cpuinfo, err = %d\n", -n); | 73 | "/proc/cpuinfo, err = %d\n", -n); |
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | } while(c != '\n'); | 76 | } while (c != '\n'); |
77 | } | 77 | } |
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
@@ -83,46 +83,50 @@ static int check_cpu_flag(char *feature, int *have_it) | |||
83 | char buf[MAXTOKEN], c; | 83 | char buf[MAXTOKEN], c; |
84 | int fd, len = ARRAY_SIZE(buf); | 84 | int fd, len = ARRAY_SIZE(buf); |
85 | 85 | ||
86 | printk("Checking for host processor %s support...", feature); | 86 | printk(UM_KERN_INFO "Checking for host processor %s support...", |
87 | feature); | ||
87 | fd = os_open_file("/proc/cpuinfo", of_read(OPENFLAGS()), 0); | 88 | fd = os_open_file("/proc/cpuinfo", of_read(OPENFLAGS()), 0); |
88 | if(fd < 0){ | 89 | if (fd < 0) { |
89 | printk("Couldn't open /proc/cpuinfo, err = %d\n", -fd); | 90 | printk(UM_KERN_ERR "Couldn't open /proc/cpuinfo, err = %d\n", |
91 | -fd); | ||
90 | return 0; | 92 | return 0; |
91 | } | 93 | } |
92 | 94 | ||
93 | *have_it = 0; | 95 | *have_it = 0; |
94 | if(!find_cpuinfo_line(fd, "flags", buf, ARRAY_SIZE(buf))) | 96 | if (!find_cpuinfo_line(fd, "flags", buf, ARRAY_SIZE(buf))) |
95 | goto out; | 97 | goto out; |
96 | 98 | ||
97 | c = token(fd, buf, len - 1, ' '); | 99 | c = token(fd, buf, len - 1, ' '); |
98 | if(c < 0) | 100 | if (c < 0) |
99 | goto out; | 101 | goto out; |
100 | else if(c != ' '){ | 102 | else if (c != ' ') { |
101 | printk("Failed to find ' ' in /proc/cpuinfo\n"); | 103 | printk(UM_KERN_ERR "Failed to find ' ' in /proc/cpuinfo\n"); |
102 | goto out; | 104 | goto out; |
103 | } | 105 | } |
104 | 106 | ||
105 | while(1){ | 107 | while (1) { |
106 | c = token(fd, buf, len - 1, ' '); | 108 | c = token(fd, buf, len - 1, ' '); |
107 | if(c < 0) | 109 | if (c < 0) |
108 | goto out; | 110 | goto out; |
109 | else if(c == '\n') break; | 111 | else if (c == '\n') |
112 | break; | ||
110 | 113 | ||
111 | if(!strcmp(buf, feature)){ | 114 | if (!strcmp(buf, feature)) { |
112 | *have_it = 1; | 115 | *have_it = 1; |
113 | goto out; | 116 | goto out; |
114 | } | 117 | } |
115 | } | 118 | } |
116 | out: | 119 | out: |
117 | if(*have_it == 0) | 120 | if (*have_it == 0) |
118 | printk("No\n"); | 121 | printk("No\n"); |
119 | else if(*have_it == 1) | 122 | else if (*have_it == 1) |
120 | printk("Yes\n"); | 123 | printk("Yes\n"); |
121 | os_close_file(fd); | 124 | os_close_file(fd); |
122 | return 1; | 125 | return 1; |
123 | } | 126 | } |
124 | 127 | ||
125 | #if 0 /* This doesn't work in tt mode, plus it's causing compilation problems | 128 | #if 0 /* |
129 | * This doesn't work in tt mode, plus it's causing compilation problems | ||
126 | * for some people. | 130 | * for some people. |
127 | */ | 131 | */ |
128 | static void disable_lcall(void) | 132 | static void disable_lcall(void) |
@@ -135,8 +139,9 @@ static void disable_lcall(void) | |||
135 | ldt.base_addr = 0; | 139 | ldt.base_addr = 0; |
136 | ldt.limit = 0; | 140 | ldt.limit = 0; |
137 | err = modify_ldt(1, &ldt, sizeof(ldt)); | 141 | err = modify_ldt(1, &ldt, sizeof(ldt)); |
138 | if(err) | 142 | if (err) |
139 | printk("Failed to disable lcall7 - errno = %d\n", errno); | 143 | printk(UM_KERN_ERR "Failed to disable lcall7 - errno = %d\n", |
144 | errno); | ||
140 | } | 145 | } |
141 | #endif | 146 | #endif |
142 | 147 | ||
@@ -151,14 +156,14 @@ void arch_check_bugs(void) | |||
151 | { | 156 | { |
152 | int have_it; | 157 | int have_it; |
153 | 158 | ||
154 | if(os_access("/proc/cpuinfo", OS_ACC_R_OK) < 0){ | 159 | if (os_access("/proc/cpuinfo", OS_ACC_R_OK) < 0) { |
155 | printk("/proc/cpuinfo not available - skipping CPU capability " | 160 | printk(UM_KERN_ERR "/proc/cpuinfo not available - skipping CPU " |
156 | "checks\n"); | 161 | "capability checks\n"); |
157 | return; | 162 | return; |
158 | } | 163 | } |
159 | if(check_cpu_flag("cmov", &have_it)) | 164 | if (check_cpu_flag("cmov", &have_it)) |
160 | host_has_cmov = have_it; | 165 | host_has_cmov = have_it; |
161 | if(check_cpu_flag("xmm", &have_it)) | 166 | if (check_cpu_flag("xmm", &have_it)) |
162 | host_has_xmm = have_it; | 167 | host_has_xmm = have_it; |
163 | } | 168 | } |
164 | 169 | ||
@@ -166,25 +171,26 @@ int arch_handle_signal(int sig, struct uml_pt_regs *regs) | |||
166 | { | 171 | { |
167 | unsigned char tmp[2]; | 172 | unsigned char tmp[2]; |
168 | 173 | ||
169 | /* This is testing for a cmov (0x0f 0x4x) instruction causing a | 174 | /* |
175 | * This is testing for a cmov (0x0f 0x4x) instruction causing a | ||
170 | * SIGILL in init. | 176 | * SIGILL in init. |
171 | */ | 177 | */ |
172 | if((sig != SIGILL) || (TASK_PID(get_current()) != 1)) | 178 | if ((sig != SIGILL) || (TASK_PID(get_current()) != 1)) |
173 | return 0; | 179 | return 0; |
174 | 180 | ||
175 | if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) | 181 | if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) |
176 | panic("SIGILL in init, could not read instructions!\n"); | 182 | panic("SIGILL in init, could not read instructions!\n"); |
177 | if((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) | 183 | if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) |
178 | return 0; | 184 | return 0; |
179 | 185 | ||
180 | if(host_has_cmov == 0) | 186 | if (host_has_cmov == 0) |
181 | panic("SIGILL caused by cmov, which this processor doesn't " | 187 | panic("SIGILL caused by cmov, which this processor doesn't " |
182 | "implement, boot a filesystem compiled for older " | 188 | "implement, boot a filesystem compiled for older " |
183 | "processors"); | 189 | "processors"); |
184 | else if(host_has_cmov == 1) | 190 | else if (host_has_cmov == 1) |
185 | panic("SIGILL caused by cmov, which this processor claims to " | 191 | panic("SIGILL caused by cmov, which this processor claims to " |
186 | "implement"); | 192 | "implement"); |
187 | else if(host_has_cmov == -1) | 193 | else if (host_has_cmov == -1) |
188 | panic("SIGILL caused by cmov, couldn't tell if this processor " | 194 | panic("SIGILL caused by cmov, couldn't tell if this processor " |
189 | "implements it, boot a filesystem compiled for older " | 195 | "implements it, boot a filesystem compiled for older " |
190 | "processors"); | 196 | "processors"); |
diff --git a/arch/um/sys-i386/fault.c b/arch/um/sys-i386/fault.c index cc06a5737df0..d670f68532f4 100644 --- a/arch/um/sys-i386/fault.c +++ b/arch/um/sys-i386/fault.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -20,9 +20,9 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs) | |||
20 | const struct exception_table_entry *fixup; | 20 | const struct exception_table_entry *fixup; |
21 | 21 | ||
22 | fixup = search_exception_tables(address); | 22 | fixup = search_exception_tables(address); |
23 | if(fixup != 0){ | 23 | if (fixup != 0) { |
24 | UPT_IP(regs) = fixup->fixup; | 24 | UPT_IP(regs) = fixup->fixup; |
25 | return(1); | 25 | return 1; |
26 | } | 26 | } |
27 | return(0); | 27 | return 0; |
28 | } | 28 | } |
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c index 906c2a4e7279..0bf7572a80a3 100644 --- a/arch/um/sys-i386/ldt.c +++ b/arch/um/sys-i386/ldt.c | |||
@@ -1,34 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sched.h" | 6 | #include "linux/mm.h" |
7 | #include "linux/slab.h" | ||
8 | #include "linux/types.h" | ||
9 | #include "linux/errno.h" | ||
10 | #include "linux/spinlock.h" | ||
11 | #include "asm/uaccess.h" | ||
12 | #include "asm/smp.h" | ||
13 | #include "asm/ldt.h" | ||
14 | #include "asm/unistd.h" | 7 | #include "asm/unistd.h" |
15 | #include "kern.h" | ||
16 | #include "os.h" | 8 | #include "os.h" |
17 | 9 | #include "proc_mm.h" | |
18 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); | ||
19 | |||
20 | #include "skas.h" | 10 | #include "skas.h" |
21 | #include "skas_ptrace.h" | 11 | #include "skas_ptrace.h" |
22 | #include "asm/mmu_context.h" | 12 | #include "sysdep/tls.h" |
23 | #include "proc_mm.h" | 13 | |
14 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); | ||
24 | 15 | ||
25 | long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | 16 | long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, |
26 | void **addr, int done) | 17 | void **addr, int done) |
27 | { | 18 | { |
28 | long res; | 19 | long res; |
29 | 20 | ||
30 | if(proc_mm){ | 21 | if (proc_mm) { |
31 | /* This is a special handling for the case, that the mm to | 22 | /* |
23 | * This is a special handling for the case, that the mm to | ||
32 | * modify isn't current->active_mm. | 24 | * modify isn't current->active_mm. |
33 | * If this is called directly by modify_ldt, | 25 | * If this is called directly by modify_ldt, |
34 | * (current->active_mm->context.skas.u == mm_idp) | 26 | * (current->active_mm->context.skas.u == mm_idp) |
@@ -40,12 +32,12 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | |||
40 | * | 32 | * |
41 | * Note: I'm unsure: should interrupts be disabled here? | 33 | * Note: I'm unsure: should interrupts be disabled here? |
42 | */ | 34 | */ |
43 | if(!current->active_mm || current->active_mm == &init_mm || | 35 | if (!current->active_mm || current->active_mm == &init_mm || |
44 | mm_idp != ¤t->active_mm->context.skas.id) | 36 | mm_idp != ¤t->active_mm->context.skas.id) |
45 | __switch_mm(mm_idp); | 37 | __switch_mm(mm_idp); |
46 | } | 38 | } |
47 | 39 | ||
48 | if(ptrace_ldt) { | 40 | if (ptrace_ldt) { |
49 | struct ptrace_ldt ldt_op = (struct ptrace_ldt) { | 41 | struct ptrace_ldt ldt_op = (struct ptrace_ldt) { |
50 | .func = func, | 42 | .func = func, |
51 | .ptr = desc, | 43 | .ptr = desc, |
@@ -53,7 +45,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | |||
53 | u32 cpu; | 45 | u32 cpu; |
54 | int pid; | 46 | int pid; |
55 | 47 | ||
56 | if(!proc_mm) | 48 | if (!proc_mm) |
57 | pid = mm_idp->u.pid; | 49 | pid = mm_idp->u.pid; |
58 | else { | 50 | else { |
59 | cpu = get_cpu(); | 51 | cpu = get_cpu(); |
@@ -62,7 +54,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | |||
62 | 54 | ||
63 | res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); | 55 | res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); |
64 | 56 | ||
65 | if(proc_mm) | 57 | if (proc_mm) |
66 | put_cpu(); | 58 | put_cpu(); |
67 | } | 59 | } |
68 | else { | 60 | else { |
@@ -71,7 +63,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | |||
71 | (sizeof(*desc) + sizeof(long) - 1) & | 63 | (sizeof(*desc) + sizeof(long) - 1) & |
72 | ~(sizeof(long) - 1), | 64 | ~(sizeof(long) - 1), |
73 | addr, &stub_addr); | 65 | addr, &stub_addr); |
74 | if(!res){ | 66 | if (!res) { |
75 | unsigned long args[] = { func, | 67 | unsigned long args[] = { func, |
76 | (unsigned long)stub_addr, | 68 | (unsigned long)stub_addr, |
77 | sizeof(*desc), | 69 | sizeof(*desc), |
@@ -81,12 +73,13 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | |||
81 | } | 73 | } |
82 | } | 74 | } |
83 | 75 | ||
84 | if(proc_mm){ | 76 | if (proc_mm) { |
85 | /* This is the second part of special handling, that makes | 77 | /* |
78 | * This is the second part of special handling, that makes | ||
86 | * PTRACE_LDT possible to implement. | 79 | * PTRACE_LDT possible to implement. |
87 | */ | 80 | */ |
88 | if(current->active_mm && current->active_mm != &init_mm && | 81 | if (current->active_mm && current->active_mm != &init_mm && |
89 | mm_idp != ¤t->active_mm->context.skas.id) | 82 | mm_idp != ¤t->active_mm->context.skas.id) |
90 | __switch_mm(¤t->active_mm->context.skas.id); | 83 | __switch_mm(¤t->active_mm->context.skas.id); |
91 | } | 84 | } |
92 | 85 | ||
@@ -102,21 +95,22 @@ static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) | |||
102 | .ptr = kmalloc(bytecount, GFP_KERNEL)}; | 95 | .ptr = kmalloc(bytecount, GFP_KERNEL)}; |
103 | u32 cpu; | 96 | u32 cpu; |
104 | 97 | ||
105 | if(ptrace_ldt.ptr == NULL) | 98 | if (ptrace_ldt.ptr == NULL) |
106 | return -ENOMEM; | 99 | return -ENOMEM; |
107 | 100 | ||
108 | /* This is called from sys_modify_ldt only, so userspace_pid gives | 101 | /* |
102 | * This is called from sys_modify_ldt only, so userspace_pid gives | ||
109 | * us the right number | 103 | * us the right number |
110 | */ | 104 | */ |
111 | 105 | ||
112 | cpu = get_cpu(); | 106 | cpu = get_cpu(); |
113 | res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); | 107 | res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); |
114 | put_cpu(); | 108 | put_cpu(); |
115 | if(res < 0) | 109 | if (res < 0) |
116 | goto out; | 110 | goto out; |
117 | 111 | ||
118 | n = copy_to_user(ptr, ptrace_ldt.ptr, res); | 112 | n = copy_to_user(ptr, ptrace_ldt.ptr, res); |
119 | if(n != 0) | 113 | if (n != 0) |
120 | res = -EFAULT; | 114 | res = -EFAULT; |
121 | 115 | ||
122 | out: | 116 | out: |
@@ -143,33 +137,32 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) | |||
143 | unsigned long size; | 137 | unsigned long size; |
144 | uml_ldt_t * ldt = ¤t->mm->context.skas.ldt; | 138 | uml_ldt_t * ldt = ¤t->mm->context.skas.ldt; |
145 | 139 | ||
146 | if(!ldt->entry_count) | 140 | if (!ldt->entry_count) |
147 | goto out; | 141 | goto out; |
148 | if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | 142 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) |
149 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | 143 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; |
150 | err = bytecount; | 144 | err = bytecount; |
151 | 145 | ||
152 | if(ptrace_ldt){ | 146 | if (ptrace_ldt) |
153 | return read_ldt_from_host(ptr, bytecount); | 147 | return read_ldt_from_host(ptr, bytecount); |
154 | } | ||
155 | 148 | ||
156 | down(&ldt->semaphore); | 149 | down(&ldt->semaphore); |
157 | if(ldt->entry_count <= LDT_DIRECT_ENTRIES){ | 150 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { |
158 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; | 151 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; |
159 | if(size > bytecount) | 152 | if (size > bytecount) |
160 | size = bytecount; | 153 | size = bytecount; |
161 | if(copy_to_user(ptr, ldt->u.entries, size)) | 154 | if (copy_to_user(ptr, ldt->u.entries, size)) |
162 | err = -EFAULT; | 155 | err = -EFAULT; |
163 | bytecount -= size; | 156 | bytecount -= size; |
164 | ptr += size; | 157 | ptr += size; |
165 | } | 158 | } |
166 | else { | 159 | else { |
167 | for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; | 160 | for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; |
168 | i++){ | 161 | i++) { |
169 | size = PAGE_SIZE; | 162 | size = PAGE_SIZE; |
170 | if(size > bytecount) | 163 | if (size > bytecount) |
171 | size = bytecount; | 164 | size = bytecount; |
172 | if(copy_to_user(ptr, ldt->u.pages[i], size)){ | 165 | if (copy_to_user(ptr, ldt->u.pages[i], size)) { |
173 | err = -EFAULT; | 166 | err = -EFAULT; |
174 | break; | 167 | break; |
175 | } | 168 | } |
@@ -179,10 +172,10 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) | |||
179 | } | 172 | } |
180 | up(&ldt->semaphore); | 173 | up(&ldt->semaphore); |
181 | 174 | ||
182 | if(bytecount == 0 || err == -EFAULT) | 175 | if (bytecount == 0 || err == -EFAULT) |
183 | goto out; | 176 | goto out; |
184 | 177 | ||
185 | if(clear_user(ptr, bytecount)) | 178 | if (clear_user(ptr, bytecount)) |
186 | err = -EFAULT; | 179 | err = -EFAULT; |
187 | 180 | ||
188 | out: | 181 | out: |
@@ -193,15 +186,16 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount) | |||
193 | { | 186 | { |
194 | int err; | 187 | int err; |
195 | 188 | ||
196 | if(bytecount > 5*LDT_ENTRY_SIZE) | 189 | if (bytecount > 5*LDT_ENTRY_SIZE) |
197 | bytecount = 5*LDT_ENTRY_SIZE; | 190 | bytecount = 5*LDT_ENTRY_SIZE; |
198 | 191 | ||
199 | err = bytecount; | 192 | err = bytecount; |
200 | /* UML doesn't support lcall7 and lcall27. | 193 | /* |
194 | * UML doesn't support lcall7 and lcall27. | ||
201 | * So, we don't really have a default ldt, but emulate | 195 | * So, we don't really have a default ldt, but emulate |
202 | * an empty ldt of common host default ldt size. | 196 | * an empty ldt of common host default ldt size. |
203 | */ | 197 | */ |
204 | if(clear_user(ptr, bytecount)) | 198 | if (clear_user(ptr, bytecount)) |
205 | err = -EFAULT; | 199 | err = -EFAULT; |
206 | 200 | ||
207 | return err; | 201 | return err; |
@@ -217,52 +211,52 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func) | |||
217 | void *addr = NULL; | 211 | void *addr = NULL; |
218 | 212 | ||
219 | err = -EINVAL; | 213 | err = -EINVAL; |
220 | if(bytecount != sizeof(ldt_info)) | 214 | if (bytecount != sizeof(ldt_info)) |
221 | goto out; | 215 | goto out; |
222 | err = -EFAULT; | 216 | err = -EFAULT; |
223 | if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) | 217 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) |
224 | goto out; | 218 | goto out; |
225 | 219 | ||
226 | err = -EINVAL; | 220 | err = -EINVAL; |
227 | if(ldt_info.entry_number >= LDT_ENTRIES) | 221 | if (ldt_info.entry_number >= LDT_ENTRIES) |
228 | goto out; | 222 | goto out; |
229 | if(ldt_info.contents == 3){ | 223 | if (ldt_info.contents == 3) { |
230 | if (func == 1) | 224 | if (func == 1) |
231 | goto out; | 225 | goto out; |
232 | if (ldt_info.seg_not_present == 0) | 226 | if (ldt_info.seg_not_present == 0) |
233 | goto out; | 227 | goto out; |
234 | } | 228 | } |
235 | 229 | ||
236 | if(!ptrace_ldt) | 230 | if (!ptrace_ldt) |
237 | down(&ldt->semaphore); | 231 | down(&ldt->semaphore); |
238 | 232 | ||
239 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); | 233 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); |
240 | if(err) | 234 | if (err) |
241 | goto out_unlock; | 235 | goto out_unlock; |
242 | else if(ptrace_ldt) { | 236 | else if (ptrace_ldt) { |
243 | /* With PTRACE_LDT available, this is used as a flag only */ | 237 | /* With PTRACE_LDT available, this is used as a flag only */ |
244 | ldt->entry_count = 1; | 238 | ldt->entry_count = 1; |
245 | goto out; | 239 | goto out; |
246 | } | 240 | } |
247 | 241 | ||
248 | if(ldt_info.entry_number >= ldt->entry_count && | 242 | if (ldt_info.entry_number >= ldt->entry_count && |
249 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES){ | 243 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { |
250 | for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; | 244 | for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; |
251 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; | 245 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; |
252 | i++){ | 246 | i++) { |
253 | if(i == 0) | 247 | if (i == 0) |
254 | memcpy(&entry0, ldt->u.entries, | 248 | memcpy(&entry0, ldt->u.entries, |
255 | sizeof(entry0)); | 249 | sizeof(entry0)); |
256 | ldt->u.pages[i] = (struct ldt_entry *) | 250 | ldt->u.pages[i] = (struct ldt_entry *) |
257 | __get_free_page(GFP_KERNEL|__GFP_ZERO); | 251 | __get_free_page(GFP_KERNEL|__GFP_ZERO); |
258 | if(!ldt->u.pages[i]){ | 252 | if (!ldt->u.pages[i]) { |
259 | err = -ENOMEM; | 253 | err = -ENOMEM; |
260 | /* Undo the change in host */ | 254 | /* Undo the change in host */ |
261 | memset(&ldt_info, 0, sizeof(ldt_info)); | 255 | memset(&ldt_info, 0, sizeof(ldt_info)); |
262 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); | 256 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); |
263 | goto out_unlock; | 257 | goto out_unlock; |
264 | } | 258 | } |
265 | if(i == 0) { | 259 | if (i == 0) { |
266 | memcpy(ldt->u.pages[0], &entry0, | 260 | memcpy(ldt->u.pages[0], &entry0, |
267 | sizeof(entry0)); | 261 | sizeof(entry0)); |
268 | memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, | 262 | memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, |
@@ -271,17 +265,17 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func) | |||
271 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; | 265 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; |
272 | } | 266 | } |
273 | } | 267 | } |
274 | if(ldt->entry_count <= ldt_info.entry_number) | 268 | if (ldt->entry_count <= ldt_info.entry_number) |
275 | ldt->entry_count = ldt_info.entry_number + 1; | 269 | ldt->entry_count = ldt_info.entry_number + 1; |
276 | 270 | ||
277 | if(ldt->entry_count <= LDT_DIRECT_ENTRIES) | 271 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) |
278 | ldt_p = ldt->u.entries + ldt_info.entry_number; | 272 | ldt_p = ldt->u.entries + ldt_info.entry_number; |
279 | else | 273 | else |
280 | ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + | 274 | ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + |
281 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; | 275 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; |
282 | 276 | ||
283 | if(ldt_info.base_addr == 0 && ldt_info.limit == 0 && | 277 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && |
284 | (func == 1 || LDT_empty(&ldt_info))){ | 278 | (func == 1 || LDT_empty(&ldt_info))) { |
285 | ldt_p->a = 0; | 279 | ldt_p->a = 0; |
286 | ldt_p->b = 0; | 280 | ldt_p->b = 0; |
287 | } | 281 | } |
@@ -332,7 +326,7 @@ static void ldt_get_host_info(void) | |||
332 | 326 | ||
333 | spin_lock(&host_ldt_lock); | 327 | spin_lock(&host_ldt_lock); |
334 | 328 | ||
335 | if(host_ldt_entries != NULL){ | 329 | if (host_ldt_entries != NULL) { |
336 | spin_unlock(&host_ldt_lock); | 330 | spin_unlock(&host_ldt_lock); |
337 | return; | 331 | return; |
338 | } | 332 | } |
@@ -340,49 +334,49 @@ static void ldt_get_host_info(void) | |||
340 | 334 | ||
341 | spin_unlock(&host_ldt_lock); | 335 | spin_unlock(&host_ldt_lock); |
342 | 336 | ||
343 | for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++); | 337 | for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) |
338 | ; | ||
344 | 339 | ||
345 | ldt = (struct ldt_entry *) | 340 | ldt = (struct ldt_entry *) |
346 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | 341 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); |
347 | if(ldt == NULL) { | 342 | if (ldt == NULL) { |
348 | printk("ldt_get_host_info: couldn't allocate buffer for host " | 343 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " |
349 | "ldt\n"); | 344 | "for host ldt\n"); |
350 | return; | 345 | return; |
351 | } | 346 | } |
352 | 347 | ||
353 | ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); | 348 | ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); |
354 | if(ret < 0) { | 349 | if (ret < 0) { |
355 | printk("ldt_get_host_info: couldn't read host ldt\n"); | 350 | printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n"); |
356 | goto out_free; | 351 | goto out_free; |
357 | } | 352 | } |
358 | if(ret == 0) { | 353 | if (ret == 0) { |
359 | /* default_ldt is active, simply write an empty entry 0 */ | 354 | /* default_ldt is active, simply write an empty entry 0 */ |
360 | host_ldt_entries = dummy_list; | 355 | host_ldt_entries = dummy_list; |
361 | goto out_free; | 356 | goto out_free; |
362 | } | 357 | } |
363 | 358 | ||
364 | for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){ | 359 | for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { |
365 | if(ldt[i].a != 0 || ldt[i].b != 0) | 360 | if (ldt[i].a != 0 || ldt[i].b != 0) |
366 | size++; | 361 | size++; |
367 | } | 362 | } |
368 | 363 | ||
369 | if(size < ARRAY_SIZE(dummy_list)) | 364 | if (size < ARRAY_SIZE(dummy_list)) |
370 | host_ldt_entries = dummy_list; | 365 | host_ldt_entries = dummy_list; |
371 | else { | 366 | else { |
372 | size = (size + 1) * sizeof(dummy_list[0]); | 367 | size = (size + 1) * sizeof(dummy_list[0]); |
373 | tmp = kmalloc(size, GFP_KERNEL); | 368 | tmp = kmalloc(size, GFP_KERNEL); |
374 | if(tmp == NULL) { | 369 | if (tmp == NULL) { |
375 | printk("ldt_get_host_info: couldn't allocate host ldt " | 370 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate " |
376 | "list\n"); | 371 | "host ldt list\n"); |
377 | goto out_free; | 372 | goto out_free; |
378 | } | 373 | } |
379 | host_ldt_entries = tmp; | 374 | host_ldt_entries = tmp; |
380 | } | 375 | } |
381 | 376 | ||
382 | for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){ | 377 | for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { |
383 | if(ldt[i].a != 0 || ldt[i].b != 0) { | 378 | if (ldt[i].a != 0 || ldt[i].b != 0) |
384 | host_ldt_entries[k++] = i; | 379 | host_ldt_entries[k++] = i; |
385 | } | ||
386 | } | 380 | } |
387 | host_ldt_entries[k] = -1; | 381 | host_ldt_entries[k] = -1; |
388 | 382 | ||
@@ -401,15 +395,15 @@ long init_new_ldt(struct mmu_context_skas * new_mm, | |||
401 | struct proc_mm_op copy; | 395 | struct proc_mm_op copy; |
402 | 396 | ||
403 | 397 | ||
404 | if(!ptrace_ldt) | 398 | if (!ptrace_ldt) |
405 | init_MUTEX(&new_mm->ldt.semaphore); | 399 | init_MUTEX(&new_mm->ldt.semaphore); |
406 | 400 | ||
407 | if(!from_mm){ | 401 | if (!from_mm) { |
408 | memset(&desc, 0, sizeof(desc)); | 402 | memset(&desc, 0, sizeof(desc)); |
409 | /* | 403 | /* |
410 | * We have to initialize a clean ldt. | 404 | * We have to initialize a clean ldt. |
411 | */ | 405 | */ |
412 | if(proc_mm) { | 406 | if (proc_mm) { |
413 | /* | 407 | /* |
414 | * If the new mm was created using proc_mm, host's | 408 | * If the new mm was created using proc_mm, host's |
415 | * default-ldt currently is assigned, which normally | 409 | * default-ldt currently is assigned, which normally |
@@ -417,8 +411,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm, | |||
417 | * To remove these gates, we simply write an empty | 411 | * To remove these gates, we simply write an empty |
418 | * entry as number 0 to the host. | 412 | * entry as number 0 to the host. |
419 | */ | 413 | */ |
420 | err = write_ldt_entry(&new_mm->id, 1, &desc, | 414 | err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1); |
421 | &addr, 1); | ||
422 | } | 415 | } |
423 | else{ | 416 | else{ |
424 | /* | 417 | /* |
@@ -427,11 +420,11 @@ long init_new_ldt(struct mmu_context_skas * new_mm, | |||
427 | * will be reset in the following loop | 420 | * will be reset in the following loop |
428 | */ | 421 | */ |
429 | ldt_get_host_info(); | 422 | ldt_get_host_info(); |
430 | for(num_p=host_ldt_entries; *num_p != -1; num_p++){ | 423 | for (num_p=host_ldt_entries; *num_p != -1; num_p++) { |
431 | desc.entry_number = *num_p; | 424 | desc.entry_number = *num_p; |
432 | err = write_ldt_entry(&new_mm->id, 1, &desc, | 425 | err = write_ldt_entry(&new_mm->id, 1, &desc, |
433 | &addr, *(num_p + 1) == -1); | 426 | &addr, *(num_p + 1) == -1); |
434 | if(err) | 427 | if (err) |
435 | break; | 428 | break; |
436 | } | 429 | } |
437 | } | 430 | } |
@@ -440,8 +433,9 @@ long init_new_ldt(struct mmu_context_skas * new_mm, | |||
440 | goto out; | 433 | goto out; |
441 | } | 434 | } |
442 | 435 | ||
443 | if(proc_mm){ | 436 | if (proc_mm) { |
444 | /* We have a valid from_mm, so we now have to copy the LDT of | 437 | /* |
438 | * We have a valid from_mm, so we now have to copy the LDT of | ||
445 | * from_mm to new_mm, because using proc_mm an new mm with | 439 | * from_mm to new_mm, because using proc_mm an new mm with |
446 | * an empty/default LDT was created in new_mm() | 440 | * an empty/default LDT was created in new_mm() |
447 | */ | 441 | */ |
@@ -450,27 +444,27 @@ long init_new_ldt(struct mmu_context_skas * new_mm, | |||
450 | { .copy_segments = | 444 | { .copy_segments = |
451 | from_mm->id.u.mm_fd } } ); | 445 | from_mm->id.u.mm_fd } } ); |
452 | i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); | 446 | i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); |
453 | if(i != sizeof(copy)) | 447 | if (i != sizeof(copy)) |
454 | printk("new_mm : /proc/mm copy_segments failed, " | 448 | printk(KERN_ERR "new_mm : /proc/mm copy_segments " |
455 | "err = %d\n", -i); | 449 | "failed, err = %d\n", -i); |
456 | } | 450 | } |
457 | 451 | ||
458 | if(!ptrace_ldt) { | 452 | if (!ptrace_ldt) { |
459 | /* Our local LDT is used to supply the data for | 453 | /* |
454 | * Our local LDT is used to supply the data for | ||
460 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, | 455 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, |
461 | * i.e., we have to use the stub for modify_ldt, which | 456 | * i.e., we have to use the stub for modify_ldt, which |
462 | * can't handle the big read buffer of up to 64kB. | 457 | * can't handle the big read buffer of up to 64kB. |
463 | */ | 458 | */ |
464 | down(&from_mm->ldt.semaphore); | 459 | down(&from_mm->ldt.semaphore); |
465 | if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){ | 460 | if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES) |
466 | memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries, | 461 | memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries, |
467 | sizeof(new_mm->ldt.u.entries)); | 462 | sizeof(new_mm->ldt.u.entries)); |
468 | } | 463 | else { |
469 | else{ | ||
470 | i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | 464 | i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; |
471 | while(i-->0){ | 465 | while (i-->0) { |
472 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); | 466 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
473 | if (!page){ | 467 | if (!page) { |
474 | err = -ENOMEM; | 468 | err = -ENOMEM; |
475 | break; | 469 | break; |
476 | } | 470 | } |
@@ -493,11 +487,10 @@ void free_ldt(struct mmu_context_skas * mm) | |||
493 | { | 487 | { |
494 | int i; | 488 | int i; |
495 | 489 | ||
496 | if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){ | 490 | if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) { |
497 | i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | 491 | i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; |
498 | while(i-- > 0){ | 492 | while (i-- > 0) |
499 | free_page((long )mm->ldt.u.pages[i]); | 493 | free_page((long) mm->ldt.u.pages[i]); |
500 | } | ||
501 | } | 494 | } |
502 | mm->ldt.entry_count = 0; | 495 | mm->ldt.entry_count = 0; |
503 | } | 496 | } |
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index dcf0c6b310c8..178f894384f4 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c | |||
@@ -1,18 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/compiler.h> | ||
7 | #include "linux/sched.h" | ||
8 | #include "linux/mm.h" | 6 | #include "linux/mm.h" |
9 | #include "asm/elf.h" | 7 | #include "linux/sched.h" |
10 | #include "asm/ptrace.h" | ||
11 | #include "asm/uaccess.h" | 8 | #include "asm/uaccess.h" |
12 | #include "asm/unistd.h" | ||
13 | #include "sysdep/ptrace.h" | ||
14 | #include "sysdep/sigcontext.h" | ||
15 | #include "sysdep/sc.h" | ||
16 | 9 | ||
17 | extern int arch_switch_tls(struct task_struct *from, struct task_struct *to); | 10 | extern int arch_switch_tls(struct task_struct *from, struct task_struct *to); |
18 | 11 | ||
@@ -23,7 +16,8 @@ void arch_switch_to(struct task_struct *from, struct task_struct *to) | |||
23 | return; | 16 | return; |
24 | 17 | ||
25 | if (err != -EINVAL) | 18 | if (err != -EINVAL) |
26 | printk(KERN_WARNING "arch_switch_tls failed, errno %d, not EINVAL\n", -err); | 19 | printk(KERN_WARNING "arch_switch_tls failed, errno %d, " |
20 | "not EINVAL\n", -err); | ||
27 | else | 21 | else |
28 | printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); | 22 | printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); |
29 | } | 23 | } |
@@ -34,21 +28,21 @@ int is_syscall(unsigned long addr) | |||
34 | int n; | 28 | int n; |
35 | 29 | ||
36 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); | 30 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); |
37 | if(n){ | 31 | if (n) { |
38 | /* access_process_vm() grants access to vsyscall and stub, | 32 | /* access_process_vm() grants access to vsyscall and stub, |
39 | * while copy_from_user doesn't. Maybe access_process_vm is | 33 | * while copy_from_user doesn't. Maybe access_process_vm is |
40 | * slow, but that doesn't matter, since it will be called only | 34 | * slow, but that doesn't matter, since it will be called only |
41 | * in case of singlestepping, if copy_from_user failed. | 35 | * in case of singlestepping, if copy_from_user failed. |
42 | */ | 36 | */ |
43 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | 37 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); |
44 | if(n != sizeof(instr)) { | 38 | if (n != sizeof(instr)) { |
45 | printk("is_syscall : failed to read instruction from " | 39 | printk(KERN_ERR "is_syscall : failed to read " |
46 | "0x%lx\n", addr); | 40 | "instruction from 0x%lx\n", addr); |
47 | return(1); | 41 | return 1; |
48 | } | 42 | } |
49 | } | 43 | } |
50 | /* int 0x80 or sysenter */ | 44 | /* int 0x80 or sysenter */ |
51 | return((instr == 0x80cd) || (instr == 0x340f)); | 45 | return (instr == 0x80cd) || (instr == 0x340f); |
52 | } | 46 | } |
53 | 47 | ||
54 | /* determines which flags the user has access to. */ | 48 | /* determines which flags the user has access to. */ |
@@ -92,21 +86,21 @@ int putreg(struct task_struct *child, int regno, unsigned long value) | |||
92 | 86 | ||
93 | int poke_user(struct task_struct *child, long addr, long data) | 87 | int poke_user(struct task_struct *child, long addr, long data) |
94 | { | 88 | { |
95 | if ((addr & 3) || addr < 0) | 89 | if ((addr & 3) || addr < 0) |
96 | return -EIO; | 90 | return -EIO; |
97 | 91 | ||
98 | if (addr < MAX_REG_OFFSET) | 92 | if (addr < MAX_REG_OFFSET) |
99 | return putreg(child, addr, data); | 93 | return putreg(child, addr, data); |
100 | 94 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | |
101 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | 95 | (addr <= offsetof(struct user, u_debugreg[7]))) { |
102 | (addr <= offsetof(struct user, u_debugreg[7]))){ | 96 | addr -= offsetof(struct user, u_debugreg[0]); |
103 | addr -= offsetof(struct user, u_debugreg[0]); | 97 | addr = addr >> 2; |
104 | addr = addr >> 2; | 98 | if ((addr == 4) || (addr == 5)) |
105 | if((addr == 4) || (addr == 5)) return -EIO; | 99 | return -EIO; |
106 | child->thread.arch.debugregs[addr] = data; | 100 | child->thread.arch.debugregs[addr] = data; |
107 | return 0; | 101 | return 0; |
108 | } | 102 | } |
109 | return -EIO; | 103 | return -EIO; |
110 | } | 104 | } |
111 | 105 | ||
112 | unsigned long getreg(struct task_struct *child, int regno) | 106 | unsigned long getreg(struct task_struct *child, int regno) |
@@ -129,20 +123,20 @@ unsigned long getreg(struct task_struct *child, int regno) | |||
129 | return retval; | 123 | return retval; |
130 | } | 124 | } |
131 | 125 | ||
126 | /* read the word at location addr in the USER area. */ | ||
132 | int peek_user(struct task_struct *child, long addr, long data) | 127 | int peek_user(struct task_struct *child, long addr, long data) |
133 | { | 128 | { |
134 | /* read the word at location addr in the USER area. */ | ||
135 | unsigned long tmp; | 129 | unsigned long tmp; |
136 | 130 | ||
137 | if ((addr & 3) || addr < 0) | 131 | if ((addr & 3) || addr < 0) |
138 | return -EIO; | 132 | return -EIO; |
139 | 133 | ||
140 | tmp = 0; /* Default return condition */ | 134 | tmp = 0; /* Default return condition */ |
141 | if(addr < MAX_REG_OFFSET){ | 135 | if (addr < MAX_REG_OFFSET) { |
142 | tmp = getreg(child, addr); | 136 | tmp = getreg(child, addr); |
143 | } | 137 | } |
144 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | 138 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && |
145 | (addr <= offsetof(struct user, u_debugreg[7]))){ | 139 | (addr <= offsetof(struct user, u_debugreg[7]))) { |
146 | addr -= offsetof(struct user, u_debugreg[0]); | 140 | addr -= offsetof(struct user, u_debugreg[0]); |
147 | addr = addr >> 2; | 141 | addr = addr >> 2; |
148 | tmp = child->thread.arch.debugregs[addr]; | 142 | tmp = child->thread.arch.debugregs[addr]; |
@@ -173,15 +167,15 @@ struct i387_fxsave_struct { | |||
173 | static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) | 167 | static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) |
174 | { | 168 | { |
175 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | 169 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ |
176 | 170 | ||
177 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | 171 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ |
178 | tmp = ~twd; | 172 | tmp = ~twd; |
179 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | 173 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ |
180 | /* and move the valid bits to the lower byte. */ | 174 | /* and move the valid bits to the lower byte. */ |
181 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | 175 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ |
182 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | 176 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ |
183 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | 177 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ |
184 | return tmp; | 178 | return tmp; |
185 | } | 179 | } |
186 | 180 | ||
187 | static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) | 181 | static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) |
@@ -235,7 +229,7 @@ static inline int convert_fxsr_to_user(struct _fpstate __user *buf, | |||
235 | return 0; | 229 | return 0; |
236 | } | 230 | } |
237 | 231 | ||
238 | static inline int convert_fxsr_from_user(struct pt_regs *regs, | 232 | static inline int convert_fxsr_from_user(struct pt_regs *regs, |
239 | struct _fpstate __user *buf) | 233 | struct _fpstate __user *buf) |
240 | { | 234 | { |
241 | return 0; | 235 | return 0; |
@@ -247,18 +241,20 @@ int get_fpregs(unsigned long buf, struct task_struct *child) | |||
247 | 241 | ||
248 | err = convert_fxsr_to_user((struct _fpstate __user *) buf, | 242 | err = convert_fxsr_to_user((struct _fpstate __user *) buf, |
249 | &child->thread.regs); | 243 | &child->thread.regs); |
250 | if(err) return(-EFAULT); | 244 | if (err) |
251 | else return(0); | 245 | return -EFAULT; |
246 | return 0; | ||
252 | } | 247 | } |
253 | 248 | ||
254 | int set_fpregs(unsigned long buf, struct task_struct *child) | 249 | int set_fpregs(unsigned long buf, struct task_struct *child) |
255 | { | 250 | { |
256 | int err; | 251 | int err; |
257 | 252 | ||
258 | err = convert_fxsr_from_user(&child->thread.regs, | 253 | err = convert_fxsr_from_user(&child->thread.regs, |
259 | (struct _fpstate __user *) buf); | 254 | (struct _fpstate __user *) buf); |
260 | if(err) return(-EFAULT); | 255 | if (err) |
261 | else return(0); | 256 | return -EFAULT; |
257 | return 0; | ||
262 | } | 258 | } |
263 | 259 | ||
264 | int get_fpxregs(unsigned long buf, struct task_struct *tsk) | 260 | int get_fpxregs(unsigned long buf, struct task_struct *tsk) |
@@ -284,7 +280,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
284 | fpu->fos = 0; | 280 | fpu->fos = 0; |
285 | memcpy(fpu->st_space, (void *) SC_FP_ST(PT_REGS_SC(regs)), | 281 | memcpy(fpu->st_space, (void *) SC_FP_ST(PT_REGS_SC(regs)), |
286 | sizeof(fpu->st_space)); | 282 | sizeof(fpu->st_space)); |
287 | return(1); | 283 | return 1; |
288 | } | 284 | } |
289 | #endif | 285 | #endif |
290 | 286 | ||
@@ -292,14 +288,3 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu ) | |||
292 | { | 288 | { |
293 | return 1; | 289 | return 1; |
294 | } | 290 | } |
295 | |||
296 | /* | ||
297 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
298 | * Emacs will notice this stuff at the end of the file and automatically | ||
299 | * adjust the settings for this buffer only. This must remain at the end | ||
300 | * of the file. | ||
301 | * --------------------------------------------------------------------------- | ||
302 | * Local variables: | ||
303 | * c-file-style: "linux" | ||
304 | * End: | ||
305 | */ | ||
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c index c64d48734e3a..c82e5f562ec6 100644 --- a/arch/um/sys-i386/signal.c +++ b/arch/um/sys-i386/signal.c | |||
@@ -1,17 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/signal.h" | ||
7 | #include "linux/ptrace.h" | 6 | #include "linux/ptrace.h" |
8 | #include "asm/current.h" | ||
9 | #include "asm/ucontext.h" | ||
10 | #include "asm/uaccess.h" | ||
11 | #include "asm/unistd.h" | 7 | #include "asm/unistd.h" |
8 | #include "asm/uaccess.h" | ||
9 | #include "asm/ucontext.h" | ||
12 | #include "frame_kern.h" | 10 | #include "frame_kern.h" |
13 | #include "sigcontext.h" | ||
14 | #include "registers.h" | ||
15 | #include "skas.h" | 11 | #include "skas.h" |
16 | 12 | ||
17 | void copy_sc(struct uml_pt_regs *regs, void *from) | 13 | void copy_sc(struct uml_pt_regs *regs, void *from) |
@@ -39,21 +35,21 @@ void copy_sc(struct uml_pt_regs *regs, void *from) | |||
39 | static int copy_sc_from_user(struct pt_regs *regs, | 35 | static int copy_sc_from_user(struct pt_regs *regs, |
40 | struct sigcontext __user *from) | 36 | struct sigcontext __user *from) |
41 | { | 37 | { |
42 | struct sigcontext sc; | 38 | struct sigcontext sc; |
43 | unsigned long fpregs[HOST_FP_SIZE]; | 39 | unsigned long fpregs[HOST_FP_SIZE]; |
44 | int err; | 40 | int err; |
45 | 41 | ||
46 | err = copy_from_user(&sc, from, sizeof(sc)); | 42 | err = copy_from_user(&sc, from, sizeof(sc)); |
47 | err |= copy_from_user(fpregs, sc.fpstate, sizeof(fpregs)); | 43 | err |= copy_from_user(fpregs, sc.fpstate, sizeof(fpregs)); |
48 | if(err) | 44 | if (err) |
49 | return err; | 45 | return err; |
50 | 46 | ||
51 | copy_sc(®s->regs, &sc); | 47 | copy_sc(®s->regs, &sc); |
52 | 48 | ||
53 | err = restore_fp_registers(userspace_pid[0], fpregs); | 49 | err = restore_fp_registers(userspace_pid[0], fpregs); |
54 | if(err < 0) { | 50 | if (err < 0) { |
55 | printk("copy_sc_from_user_skas - PTRACE_SETFPREGS failed, " | 51 | printk(KERN_ERR "copy_sc_from_user_skas - PTRACE_SETFPREGS " |
56 | "errno = %d\n", -err); | 52 | "failed, errno = %d\n", -err); |
57 | return err; | 53 | return err; |
58 | } | 54 | } |
59 | 55 | ||
@@ -64,7 +60,7 @@ static int copy_sc_to_user(struct sigcontext __user *to, | |||
64 | struct _fpstate __user *to_fp, struct pt_regs *regs, | 60 | struct _fpstate __user *to_fp, struct pt_regs *regs, |
65 | unsigned long sp) | 61 | unsigned long sp) |
66 | { | 62 | { |
67 | struct sigcontext sc; | 63 | struct sigcontext sc; |
68 | unsigned long fpregs[HOST_FP_SIZE]; | 64 | unsigned long fpregs[HOST_FP_SIZE]; |
69 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | 65 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; |
70 | int err; | 66 | int err; |
@@ -86,28 +82,29 @@ static int copy_sc_to_user(struct sigcontext __user *to, | |||
86 | sc.eflags = REGS_EFLAGS(regs->regs.regs); | 82 | sc.eflags = REGS_EFLAGS(regs->regs.regs); |
87 | sc.esp_at_signal = regs->regs.regs[UESP]; | 83 | sc.esp_at_signal = regs->regs.regs[UESP]; |
88 | sc.ss = regs->regs.regs[SS]; | 84 | sc.ss = regs->regs.regs[SS]; |
89 | sc.cr2 = fi->cr2; | 85 | sc.cr2 = fi->cr2; |
90 | sc.err = fi->error_code; | 86 | sc.err = fi->error_code; |
91 | sc.trapno = fi->trap_no; | 87 | sc.trapno = fi->trap_no; |
92 | 88 | ||
93 | err = save_fp_registers(userspace_pid[0], fpregs); | 89 | err = save_fp_registers(userspace_pid[0], fpregs); |
94 | if(err < 0){ | 90 | if (err < 0) { |
95 | printk("copy_sc_to_user_skas - PTRACE_GETFPREGS failed, " | 91 | printk(KERN_ERR "copy_sc_to_user_skas - PTRACE_GETFPREGS " |
96 | "errno = %d\n", err); | 92 | "failed, errno = %d\n", err); |
97 | return 1; | 93 | return 1; |
98 | } | 94 | } |
99 | to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1)); | 95 | to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1)); |
100 | sc.fpstate = to_fp; | 96 | sc.fpstate = to_fp; |
101 | 97 | ||
102 | if(err) | 98 | if (err) |
103 | return err; | 99 | return err; |
104 | 100 | ||
105 | return copy_to_user(to, &sc, sizeof(sc)) || | 101 | return copy_to_user(to, &sc, sizeof(sc)) || |
106 | copy_to_user(to_fp, fpregs, sizeof(fpregs)); | 102 | copy_to_user(to_fp, fpregs, sizeof(fpregs)); |
107 | } | 103 | } |
108 | 104 | ||
109 | static int copy_ucontext_to_user(struct ucontext __user *uc, struct _fpstate __user *fp, | 105 | static int copy_ucontext_to_user(struct ucontext __user *uc, |
110 | sigset_t *set, unsigned long sp) | 106 | struct _fpstate __user *fp, sigset_t *set, |
107 | unsigned long sp) | ||
111 | { | 108 | { |
112 | int err = 0; | 109 | int err = 0; |
113 | 110 | ||
@@ -157,7 +154,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, | |||
157 | return 1; | 154 | return 1; |
158 | 155 | ||
159 | restorer = frame->retcode; | 156 | restorer = frame->retcode; |
160 | if(ka->sa.sa_flags & SA_RESTORER) | 157 | if (ka->sa.sa_flags & SA_RESTORER) |
161 | restorer = ka->sa.sa_restorer; | 158 | restorer = ka->sa.sa_restorer; |
162 | 159 | ||
163 | /* Update SP now because the page fault handler refuses to extend | 160 | /* Update SP now because the page fault handler refuses to extend |
@@ -189,7 +186,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, | |||
189 | err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); | 186 | err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); |
190 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); | 187 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); |
191 | 188 | ||
192 | if(err) | 189 | if (err) |
193 | goto err; | 190 | goto err; |
194 | 191 | ||
195 | PT_REGS_SP(regs) = (unsigned long) frame; | 192 | PT_REGS_SP(regs) = (unsigned long) frame; |
@@ -222,7 +219,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
222 | return 1; | 219 | return 1; |
223 | 220 | ||
224 | restorer = frame->retcode; | 221 | restorer = frame->retcode; |
225 | if(ka->sa.sa_flags & SA_RESTORER) | 222 | if (ka->sa.sa_flags & SA_RESTORER) |
226 | restorer = ka->sa.sa_restorer; | 223 | restorer = ka->sa.sa_restorer; |
227 | 224 | ||
228 | /* See comment above about why this is here */ | 225 | /* See comment above about why this is here */ |
@@ -247,7 +244,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
247 | err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); | 244 | err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); |
248 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); | 245 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); |
249 | 246 | ||
250 | if(err) | 247 | if (err) |
251 | goto err; | 248 | goto err; |
252 | 249 | ||
253 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | 250 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; |
@@ -274,8 +271,8 @@ long sys_sigreturn(struct pt_regs regs) | |||
274 | unsigned long __user *extramask = frame->extramask; | 271 | unsigned long __user *extramask = frame->extramask; |
275 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); | 272 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); |
276 | 273 | ||
277 | if(copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || | 274 | if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || |
278 | copy_from_user(&set.sig[1], extramask, sig_size)) | 275 | copy_from_user(&set.sig[1], extramask, sig_size)) |
279 | goto segfault; | 276 | goto segfault; |
280 | 277 | ||
281 | sigdelsetmask(&set, ~_BLOCKABLE); | 278 | sigdelsetmask(&set, ~_BLOCKABLE); |
@@ -285,7 +282,7 @@ long sys_sigreturn(struct pt_regs regs) | |||
285 | recalc_sigpending(); | 282 | recalc_sigpending(); |
286 | spin_unlock_irq(¤t->sighand->siglock); | 283 | spin_unlock_irq(¤t->sighand->siglock); |
287 | 284 | ||
288 | if(copy_sc_from_user(¤t->thread.regs, sc)) | 285 | if (copy_sc_from_user(¤t->thread.regs, sc)) |
289 | goto segfault; | 286 | goto segfault; |
290 | 287 | ||
291 | /* Avoid ERESTART handling */ | 288 | /* Avoid ERESTART handling */ |
@@ -300,12 +297,13 @@ long sys_sigreturn(struct pt_regs regs) | |||
300 | long sys_rt_sigreturn(struct pt_regs regs) | 297 | long sys_rt_sigreturn(struct pt_regs regs) |
301 | { | 298 | { |
302 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | 299 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); |
303 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (sp - 4); | 300 | struct rt_sigframe __user *frame = |
301 | (struct rt_sigframe __user *) (sp - 4); | ||
304 | sigset_t set; | 302 | sigset_t set; |
305 | struct ucontext __user *uc = &frame->uc; | 303 | struct ucontext __user *uc = &frame->uc; |
306 | int sig_size = _NSIG_WORDS * sizeof(unsigned long); | 304 | int sig_size = _NSIG_WORDS * sizeof(unsigned long); |
307 | 305 | ||
308 | if(copy_from_user(&set, &uc->uc_sigmask, sig_size)) | 306 | if (copy_from_user(&set, &uc->uc_sigmask, sig_size)) |
309 | goto segfault; | 307 | goto segfault; |
310 | 308 | ||
311 | sigdelsetmask(&set, ~_BLOCKABLE); | 309 | sigdelsetmask(&set, ~_BLOCKABLE); |
@@ -315,7 +313,7 @@ long sys_rt_sigreturn(struct pt_regs regs) | |||
315 | recalc_sigpending(); | 313 | recalc_sigpending(); |
316 | spin_unlock_irq(¤t->sighand->siglock); | 314 | spin_unlock_irq(¤t->sighand->siglock); |
317 | 315 | ||
318 | if(copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) | 316 | if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) |
319 | goto segfault; | 317 | goto segfault; |
320 | 318 | ||
321 | /* Avoid ERESTART handling */ | 319 | /* Avoid ERESTART handling */ |
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c index 6cb7cbd137a0..b02266ab5c55 100644 --- a/arch/um/sys-i386/tls.c +++ b/arch/um/sys-i386/tls.c | |||
@@ -3,19 +3,12 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/kernel.h" | 6 | #include "linux/percpu.h" |
7 | #include "linux/sched.h" | 7 | #include "linux/sched.h" |
8 | #include "linux/slab.h" | ||
9 | #include "linux/types.h" | ||
10 | #include "asm/uaccess.h" | 8 | #include "asm/uaccess.h" |
11 | #include "asm/ptrace.h" | ||
12 | #include "asm/segment.h" | ||
13 | #include "asm/smp.h" | ||
14 | #include "asm/desc.h" | ||
15 | #include "kern.h" | ||
16 | #include "kern_util.h" | ||
17 | #include "os.h" | 9 | #include "os.h" |
18 | #include "skas.h" | 10 | #include "skas.h" |
11 | #include "sysdep/tls.h" | ||
19 | 12 | ||
20 | /* | 13 | /* |
21 | * If needed we can detect when it's uninitialized. | 14 | * If needed we can detect when it's uninitialized. |
@@ -74,7 +67,8 @@ static inline void clear_user_desc(struct user_desc* info) | |||
74 | /* Postcondition: LDT_empty(info) returns true. */ | 67 | /* Postcondition: LDT_empty(info) returns true. */ |
75 | memset(info, 0, sizeof(*info)); | 68 | memset(info, 0, sizeof(*info)); |
76 | 69 | ||
77 | /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain | 70 | /* |
71 | * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain | ||
78 | * indeed an empty user_desc. | 72 | * indeed an empty user_desc. |
79 | */ | 73 | */ |
80 | info->read_exec_only = 1; | 74 | info->read_exec_only = 1; |
@@ -89,10 +83,13 @@ static int load_TLS(int flags, struct task_struct *to) | |||
89 | int idx; | 83 | int idx; |
90 | 84 | ||
91 | for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { | 85 | for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { |
92 | struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; | 86 | struct uml_tls_struct* curr = |
87 | &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
93 | 88 | ||
94 | /* Actually, now if it wasn't flushed it gets cleared and | 89 | /* |
95 | * flushed to the host, which will clear it.*/ | 90 | * Actually, now if it wasn't flushed it gets cleared and |
91 | * flushed to the host, which will clear it. | ||
92 | */ | ||
96 | if (!curr->present) { | 93 | if (!curr->present) { |
97 | if (!curr->flushed) { | 94 | if (!curr->flushed) { |
98 | clear_user_desc(&curr->tls); | 95 | clear_user_desc(&curr->tls); |
@@ -116,7 +113,8 @@ out: | |||
116 | return ret; | 113 | return ret; |
117 | } | 114 | } |
118 | 115 | ||
119 | /* Verify if we need to do a flush for the new process, i.e. if there are any | 116 | /* |
117 | * Verify if we need to do a flush for the new process, i.e. if there are any | ||
120 | * present desc's, only if they haven't been flushed. | 118 | * present desc's, only if they haven't been flushed. |
121 | */ | 119 | */ |
122 | static inline int needs_TLS_update(struct task_struct *task) | 120 | static inline int needs_TLS_update(struct task_struct *task) |
@@ -125,10 +123,13 @@ static inline int needs_TLS_update(struct task_struct *task) | |||
125 | int ret = 0; | 123 | int ret = 0; |
126 | 124 | ||
127 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | 125 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { |
128 | struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | 126 | struct uml_tls_struct* curr = |
127 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
129 | 128 | ||
130 | /* Can't test curr->present, we may need to clear a descriptor | 129 | /* |
131 | * which had a value. */ | 130 | * Can't test curr->present, we may need to clear a descriptor |
131 | * which had a value. | ||
132 | */ | ||
132 | if (curr->flushed) | 133 | if (curr->flushed) |
133 | continue; | 134 | continue; |
134 | ret = 1; | 135 | ret = 1; |
@@ -137,7 +138,8 @@ static inline int needs_TLS_update(struct task_struct *task) | |||
137 | return ret; | 138 | return ret; |
138 | } | 139 | } |
139 | 140 | ||
140 | /* On a newly forked process, the TLS descriptors haven't yet been flushed. So | 141 | /* |
142 | * On a newly forked process, the TLS descriptors haven't yet been flushed. So | ||
141 | * we mark them as such and the first switch_to will do the job. | 143 | * we mark them as such and the first switch_to will do the job. |
142 | */ | 144 | */ |
143 | void clear_flushed_tls(struct task_struct *task) | 145 | void clear_flushed_tls(struct task_struct *task) |
@@ -145,10 +147,13 @@ void clear_flushed_tls(struct task_struct *task) | |||
145 | int i; | 147 | int i; |
146 | 148 | ||
147 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | 149 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { |
148 | struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | 150 | struct uml_tls_struct* curr = |
151 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
149 | 152 | ||
150 | /* Still correct to do this, if it wasn't present on the host it | 153 | /* |
151 | * will remain as flushed as it was. */ | 154 | * Still correct to do this, if it wasn't present on the host it |
155 | * will remain as flushed as it was. | ||
156 | */ | ||
152 | if (!curr->present) | 157 | if (!curr->present) |
153 | continue; | 158 | continue; |
154 | 159 | ||
@@ -156,23 +161,27 @@ void clear_flushed_tls(struct task_struct *task) | |||
156 | } | 161 | } |
157 | } | 162 | } |
158 | 163 | ||
159 | /* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a | 164 | /* |
165 | * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a | ||
160 | * common host process. So this is needed in SKAS0 too. | 166 | * common host process. So this is needed in SKAS0 too. |
161 | * | 167 | * |
162 | * However, if each thread had a different host process (and this was discussed | 168 | * However, if each thread had a different host process (and this was discussed |
163 | * for SMP support) this won't be needed. | 169 | * for SMP support) this won't be needed. |
164 | * | 170 | * |
165 | * And this will not need be used when (and if) we'll add support to the host | 171 | * And this will not need be used when (and if) we'll add support to the host |
166 | * SKAS patch. */ | 172 | * SKAS patch. |
173 | */ | ||
167 | 174 | ||
168 | int arch_switch_tls(struct task_struct *from, struct task_struct *to) | 175 | int arch_switch_tls(struct task_struct *from, struct task_struct *to) |
169 | { | 176 | { |
170 | if (!host_supports_tls) | 177 | if (!host_supports_tls) |
171 | return 0; | 178 | return 0; |
172 | 179 | ||
173 | /* We have no need whatsoever to switch TLS for kernel threads; beyond | 180 | /* |
181 | * We have no need whatsoever to switch TLS for kernel threads; beyond | ||
174 | * that, that would also result in us calling os_set_thread_area with | 182 | * that, that would also result in us calling os_set_thread_area with |
175 | * userspace_pid[cpu] == 0, which gives an error. */ | 183 | * userspace_pid[cpu] == 0, which gives an error. |
184 | */ | ||
176 | if (likely(to->mm)) | 185 | if (likely(to->mm)) |
177 | return load_TLS(O_FORCE, to); | 186 | return load_TLS(O_FORCE, to); |
178 | 187 | ||
@@ -232,17 +241,20 @@ static int get_tls_entry(struct task_struct* task, struct user_desc *info, int i | |||
232 | *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; | 241 | *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; |
233 | 242 | ||
234 | out: | 243 | out: |
235 | /* Temporary debugging check, to make sure that things have been | 244 | /* |
245 | * Temporary debugging check, to make sure that things have been | ||
236 | * flushed. This could be triggered if load_TLS() failed. | 246 | * flushed. This could be triggered if load_TLS() failed. |
237 | */ | 247 | */ |
238 | if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { | 248 | if (unlikely(task == current && |
249 | !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { | ||
239 | printk(KERN_ERR "get_tls_entry: task with pid %d got here " | 250 | printk(KERN_ERR "get_tls_entry: task with pid %d got here " |
240 | "without flushed TLS.", current->pid); | 251 | "without flushed TLS.", current->pid); |
241 | } | 252 | } |
242 | 253 | ||
243 | return 0; | 254 | return 0; |
244 | clear: | 255 | clear: |
245 | /* When the TLS entry has not been set, the values read to user in the | 256 | /* |
257 | * When the TLS entry has not been set, the values read to user in the | ||
246 | * tls_array are 0 (because it's cleared at boot, see | 258 | * tls_array are 0 (because it's cleared at boot, see |
247 | * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. | 259 | * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. |
248 | */ | 260 | */ |
@@ -344,8 +356,10 @@ out: | |||
344 | } | 356 | } |
345 | 357 | ||
346 | 358 | ||
347 | /* XXX: This part is probably common to i386 and x86-64. Don't create a common | 359 | /* |
348 | * file for now, do that when implementing x86-64 support.*/ | 360 | * XXX: This part is probably common to i386 and x86-64. Don't create a common |
361 | * file for now, do that when implementing x86-64 support. | ||
362 | */ | ||
349 | static int __init __setup_host_supports_tls(void) | 363 | static int __init __setup_host_supports_tls(void) |
350 | { | 364 | { |
351 | check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); | 365 | check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); |
diff --git a/arch/um/sys-x86_64/fault.c b/arch/um/sys-x86_64/fault.c index 79f37ef3dceb..ce85117fc64e 100644 --- a/arch/um/sys-x86_64/fault.c +++ b/arch/um/sys-x86_64/fault.c | |||
@@ -14,14 +14,15 @@ struct exception_table_entry | |||
14 | }; | 14 | }; |
15 | 15 | ||
16 | const struct exception_table_entry *search_exception_tables(unsigned long add); | 16 | const struct exception_table_entry *search_exception_tables(unsigned long add); |
17 | |||
17 | int arch_fixup(unsigned long address, struct uml_pt_regs *regs) | 18 | int arch_fixup(unsigned long address, struct uml_pt_regs *regs) |
18 | { | 19 | { |
19 | const struct exception_table_entry *fixup; | 20 | const struct exception_table_entry *fixup; |
20 | 21 | ||
21 | fixup = search_exception_tables(address); | 22 | fixup = search_exception_tables(address); |
22 | if(fixup != 0){ | 23 | if (fixup != 0) { |
23 | UPT_IP(regs) = fixup->fixup; | 24 | UPT_IP(regs) = fixup->fixup; |
24 | return(1); | 25 | return 1; |
25 | } | 26 | } |
26 | return(0); | 27 | return 0; |
27 | } | 28 | } |
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c index a06d66d0c409..9001d17fc3d8 100644 --- a/arch/um/sys-x86_64/signal.c +++ b/arch/um/sys-x86_64/signal.c | |||
@@ -3,16 +3,11 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/stddef.h" | ||
7 | #include "linux/errno.h" | ||
8 | #include "linux/personality.h" | 6 | #include "linux/personality.h" |
9 | #include "linux/ptrace.h" | 7 | #include "linux/ptrace.h" |
10 | #include "asm/current.h" | 8 | #include "asm/unistd.h" |
11 | #include "asm/uaccess.h" | 9 | #include "asm/uaccess.h" |
12 | #include "asm/sigcontext.h" | 10 | #include "asm/ucontext.h" |
13 | #include "asm/ptrace.h" | ||
14 | #include "asm/arch/ucontext.h" | ||
15 | #include "sysdep/ptrace.h" | ||
16 | #include "frame_kern.h" | 11 | #include "frame_kern.h" |
17 | #include "skas.h" | 12 | #include "skas.h" |
18 | 13 | ||
@@ -20,28 +15,28 @@ void copy_sc(struct uml_pt_regs *regs, void *from) | |||
20 | { | 15 | { |
21 | struct sigcontext *sc = from; | 16 | struct sigcontext *sc = from; |
22 | 17 | ||
23 | #define GETREG(regs, regno, sc, regname) \ | 18 | #define GETREG(regs, regno, sc, regname) \ |
24 | (regs)->regs[(regno) / sizeof(unsigned long)] = (sc)->regname | 19 | (regs)->regs[(regno) / sizeof(unsigned long)] = (sc)->regname |
25 | 20 | ||
26 | GETREG(regs, R8, sc, r8); | 21 | GETREG(regs, R8, sc, r8); |
27 | GETREG(regs, R9, sc, r9); | 22 | GETREG(regs, R9, sc, r9); |
28 | GETREG(regs, R10, sc, r10); | 23 | GETREG(regs, R10, sc, r10); |
29 | GETREG(regs, R11, sc, r11); | 24 | GETREG(regs, R11, sc, r11); |
30 | GETREG(regs, R12, sc, r12); | 25 | GETREG(regs, R12, sc, r12); |
31 | GETREG(regs, R13, sc, r13); | 26 | GETREG(regs, R13, sc, r13); |
32 | GETREG(regs, R14, sc, r14); | 27 | GETREG(regs, R14, sc, r14); |
33 | GETREG(regs, R15, sc, r15); | 28 | GETREG(regs, R15, sc, r15); |
34 | GETREG(regs, RDI, sc, rdi); | 29 | GETREG(regs, RDI, sc, rdi); |
35 | GETREG(regs, RSI, sc, rsi); | 30 | GETREG(regs, RSI, sc, rsi); |
36 | GETREG(regs, RBP, sc, rbp); | 31 | GETREG(regs, RBP, sc, rbp); |
37 | GETREG(regs, RBX, sc, rbx); | 32 | GETREG(regs, RBX, sc, rbx); |
38 | GETREG(regs, RDX, sc, rdx); | 33 | GETREG(regs, RDX, sc, rdx); |
39 | GETREG(regs, RAX, sc, rax); | 34 | GETREG(regs, RAX, sc, rax); |
40 | GETREG(regs, RCX, sc, rcx); | 35 | GETREG(regs, RCX, sc, rcx); |
41 | GETREG(regs, RSP, sc, rsp); | 36 | GETREG(regs, RSP, sc, rsp); |
42 | GETREG(regs, RIP, sc, rip); | 37 | GETREG(regs, RIP, sc, rip); |
43 | GETREG(regs, EFLAGS, sc, eflags); | 38 | GETREG(regs, EFLAGS, sc, eflags); |
44 | GETREG(regs, CS, sc, cs); | 39 | GETREG(regs, CS, sc, cs); |
45 | 40 | ||
46 | #undef GETREG | 41 | #undef GETREG |
47 | } | 42 | } |
@@ -49,58 +44,58 @@ void copy_sc(struct uml_pt_regs *regs, void *from) | |||
49 | static int copy_sc_from_user(struct pt_regs *regs, | 44 | static int copy_sc_from_user(struct pt_regs *regs, |
50 | struct sigcontext __user *from) | 45 | struct sigcontext __user *from) |
51 | { | 46 | { |
52 | int err = 0; | 47 | int err = 0; |
53 | 48 | ||
54 | #define GETREG(regs, regno, sc, regname) \ | 49 | #define GETREG(regs, regno, sc, regname) \ |
55 | __get_user((regs)->regs.regs[(regno) / sizeof(unsigned long)], \ | 50 | __get_user((regs)->regs.regs[(regno) / sizeof(unsigned long)], \ |
56 | &(sc)->regname) | 51 | &(sc)->regname) |
57 | 52 | ||
58 | err |= GETREG(regs, R8, from, r8); | 53 | err |= GETREG(regs, R8, from, r8); |
59 | err |= GETREG(regs, R9, from, r9); | 54 | err |= GETREG(regs, R9, from, r9); |
60 | err |= GETREG(regs, R10, from, r10); | 55 | err |= GETREG(regs, R10, from, r10); |
61 | err |= GETREG(regs, R11, from, r11); | 56 | err |= GETREG(regs, R11, from, r11); |
62 | err |= GETREG(regs, R12, from, r12); | 57 | err |= GETREG(regs, R12, from, r12); |
63 | err |= GETREG(regs, R13, from, r13); | 58 | err |= GETREG(regs, R13, from, r13); |
64 | err |= GETREG(regs, R14, from, r14); | 59 | err |= GETREG(regs, R14, from, r14); |
65 | err |= GETREG(regs, R15, from, r15); | 60 | err |= GETREG(regs, R15, from, r15); |
66 | err |= GETREG(regs, RDI, from, rdi); | 61 | err |= GETREG(regs, RDI, from, rdi); |
67 | err |= GETREG(regs, RSI, from, rsi); | 62 | err |= GETREG(regs, RSI, from, rsi); |
68 | err |= GETREG(regs, RBP, from, rbp); | 63 | err |= GETREG(regs, RBP, from, rbp); |
69 | err |= GETREG(regs, RBX, from, rbx); | 64 | err |= GETREG(regs, RBX, from, rbx); |
70 | err |= GETREG(regs, RDX, from, rdx); | 65 | err |= GETREG(regs, RDX, from, rdx); |
71 | err |= GETREG(regs, RAX, from, rax); | 66 | err |= GETREG(regs, RAX, from, rax); |
72 | err |= GETREG(regs, RCX, from, rcx); | 67 | err |= GETREG(regs, RCX, from, rcx); |
73 | err |= GETREG(regs, RSP, from, rsp); | 68 | err |= GETREG(regs, RSP, from, rsp); |
74 | err |= GETREG(regs, RIP, from, rip); | 69 | err |= GETREG(regs, RIP, from, rip); |
75 | err |= GETREG(regs, EFLAGS, from, eflags); | 70 | err |= GETREG(regs, EFLAGS, from, eflags); |
76 | err |= GETREG(regs, CS, from, cs); | 71 | err |= GETREG(regs, CS, from, cs); |
77 | 72 | ||
78 | #undef GETREG | 73 | #undef GETREG |
79 | 74 | ||
80 | return err; | 75 | return err; |
81 | } | 76 | } |
82 | 77 | ||
83 | static int copy_sc_to_user(struct sigcontext __user *to, | 78 | static int copy_sc_to_user(struct sigcontext __user *to, |
84 | struct _fpstate __user *to_fp, struct pt_regs *regs, | 79 | struct _fpstate __user *to_fp, struct pt_regs *regs, |
85 | unsigned long mask, unsigned long sp) | 80 | unsigned long mask, unsigned long sp) |
86 | { | 81 | { |
87 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | 82 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; |
88 | int err = 0; | 83 | int err = 0; |
89 | 84 | ||
90 | err |= __put_user(0, &to->gs); | 85 | err |= __put_user(0, &to->gs); |
91 | err |= __put_user(0, &to->fs); | 86 | err |= __put_user(0, &to->fs); |
92 | 87 | ||
93 | #define PUTREG(regs, regno, sc, regname) \ | 88 | #define PUTREG(regs, regno, sc, regname) \ |
94 | __put_user((regs)->regs.regs[(regno) / sizeof(unsigned long)], \ | 89 | __put_user((regs)->regs.regs[(regno) / sizeof(unsigned long)], \ |
95 | &(sc)->regname) | 90 | &(sc)->regname) |
96 | 91 | ||
97 | err |= PUTREG(regs, RDI, to, rdi); | 92 | err |= PUTREG(regs, RDI, to, rdi); |
98 | err |= PUTREG(regs, RSI, to, rsi); | 93 | err |= PUTREG(regs, RSI, to, rsi); |
99 | err |= PUTREG(regs, RBP, to, rbp); | 94 | err |= PUTREG(regs, RBP, to, rbp); |
100 | /* Must use orignal RSP, which is passed in, rather than what's in | 95 | /* Must use orignal RSP, which is passed in, rather than what's in |
101 | * the pt_regs, because that's already been updated to point at the | 96 | * the pt_regs, because that's already been updated to point at the |
102 | * signal frame. | 97 | * signal frame. |
103 | */ | 98 | */ |
104 | err |= __put_user(sp, &to->rsp); | 99 | err |= __put_user(sp, &to->rsp); |
105 | err |= PUTREG(regs, RBX, to, rbx); | 100 | err |= PUTREG(regs, RBX, to, rbx); |
106 | err |= PUTREG(regs, RDX, to, rdx); | 101 | err |= PUTREG(regs, RDX, to, rdx); |
@@ -116,9 +111,9 @@ static int copy_sc_to_user(struct sigcontext __user *to, | |||
116 | err |= PUTREG(regs, R15, to, r15); | 111 | err |= PUTREG(regs, R15, to, r15); |
117 | err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */ | 112 | err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */ |
118 | 113 | ||
119 | err |= __put_user(fi->cr2, &to->cr2); | 114 | err |= __put_user(fi->cr2, &to->cr2); |
120 | err |= __put_user(fi->error_code, &to->err); | 115 | err |= __put_user(fi->error_code, &to->err); |
121 | err |= __put_user(fi->trap_no, &to->trapno); | 116 | err |= __put_user(fi->trap_no, &to->trapno); |
122 | 117 | ||
123 | err |= PUTREG(regs, RIP, to, rip); | 118 | err |= PUTREG(regs, RIP, to, rip); |
124 | err |= PUTREG(regs, EFLAGS, to, eflags); | 119 | err |= PUTREG(regs, EFLAGS, to, eflags); |
@@ -131,9 +126,9 @@ static int copy_sc_to_user(struct sigcontext __user *to, | |||
131 | 126 | ||
132 | struct rt_sigframe | 127 | struct rt_sigframe |
133 | { | 128 | { |
134 | char __user *pretcode; | 129 | char __user *pretcode; |
135 | struct ucontext uc; | 130 | struct ucontext uc; |
136 | struct siginfo info; | 131 | struct siginfo info; |
137 | }; | 132 | }; |
138 | 133 | ||
139 | #define round_down(m, n) (((m) / (n)) * (n)) | 134 | #define round_down(m, n) (((m) / (n)) * (n)) |
@@ -151,7 +146,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
151 | frame = (struct rt_sigframe __user *) | 146 | frame = (struct rt_sigframe __user *) |
152 | round_down(stack_top - sizeof(struct rt_sigframe), 16); | 147 | round_down(stack_top - sizeof(struct rt_sigframe), 16); |
153 | /* Subtract 128 for a red zone and 8 for proper alignment */ | 148 | /* Subtract 128 for a red zone and 8 for proper alignment */ |
154 | frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); | 149 | frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); |
155 | 150 | ||
156 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) | 151 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) |
157 | goto out; | 152 | goto out; |
@@ -241,7 +236,7 @@ long sys_rt_sigreturn(struct pt_regs *regs) | |||
241 | struct ucontext __user *uc = &frame->uc; | 236 | struct ucontext __user *uc = &frame->uc; |
242 | sigset_t set; | 237 | sigset_t set; |
243 | 238 | ||
244 | if(copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) | 239 | if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) |
245 | goto segfault; | 240 | goto segfault; |
246 | 241 | ||
247 | sigdelsetmask(&set, ~_BLOCKABLE); | 242 | sigdelsetmask(&set, ~_BLOCKABLE); |
@@ -251,7 +246,7 @@ long sys_rt_sigreturn(struct pt_regs *regs) | |||
251 | recalc_sigpending(); | 246 | recalc_sigpending(); |
252 | spin_unlock_irq(¤t->sighand->siglock); | 247 | spin_unlock_irq(¤t->sighand->siglock); |
253 | 248 | ||
254 | if(copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) | 249 | if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) |
255 | goto segfault; | 250 | goto segfault; |
256 | 251 | ||
257 | /* Avoid ERESTART handling */ | 252 | /* Avoid ERESTART handling */ |
@@ -262,13 +257,3 @@ long sys_rt_sigreturn(struct pt_regs *regs) | |||
262 | force_sig(SIGSEGV, current); | 257 | force_sig(SIGSEGV, current); |
263 | return 0; | 258 | return 0; |
264 | } | 259 | } |
265 | /* | ||
266 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
267 | * Emacs will notice this stuff at the end of the file and automatically | ||
268 | * adjust the settings for this buffer only. This must remain at the end | ||
269 | * of the file. | ||
270 | * --------------------------------------------------------------------------- | ||
271 | * Local variables: | ||
272 | * c-file-style: "linux" | ||
273 | * End: | ||
274 | */ | ||
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c index bbcab773b23d..58ae06562b4a 100644 --- a/arch/um/sys-x86_64/syscalls.c +++ b/arch/um/sys-x86_64/syscalls.c | |||
@@ -1,36 +1,34 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
2 | * Copyright 2003 PathScale, Inc. | 3 | * Copyright 2003 PathScale, Inc. |
3 | * | 4 | * |
4 | * Licensed under the GPL | 5 | * Licensed under the GPL |
5 | */ | 6 | */ |
6 | 7 | ||
7 | #include "linux/linkage.h" | 8 | #include "linux/linkage.h" |
8 | #include "linux/slab.h" | ||
9 | #include "linux/shm.h" | ||
10 | #include "linux/utsname.h" | ||
11 | #include "linux/personality.h" | 9 | #include "linux/personality.h" |
12 | #include "asm/uaccess.h" | 10 | #include "linux/utsname.h" |
13 | #define __FRAME_OFFSETS | ||
14 | #include "asm/ptrace.h" | ||
15 | #include "asm/unistd.h" | ||
16 | #include "asm/prctl.h" /* XXX This should get the constants from libc */ | 11 | #include "asm/prctl.h" /* XXX This should get the constants from libc */ |
17 | #include "kern.h" | 12 | #include "asm/uaccess.h" |
18 | #include "os.h" | 13 | #include "os.h" |
19 | 14 | ||
20 | asmlinkage long sys_uname64(struct new_utsname __user * name) | 15 | asmlinkage long sys_uname64(struct new_utsname __user * name) |
21 | { | 16 | { |
22 | int err; | 17 | int err; |
18 | |||
23 | down_read(&uts_sem); | 19 | down_read(&uts_sem); |
24 | err = copy_to_user(name, utsname(), sizeof (*name)); | 20 | err = copy_to_user(name, utsname(), sizeof (*name)); |
25 | up_read(&uts_sem); | 21 | up_read(&uts_sem); |
22 | |||
26 | if (personality(current->personality) == PER_LINUX32) | 23 | if (personality(current->personality) == PER_LINUX32) |
27 | err |= copy_to_user(&name->machine, "i686", 5); | 24 | err |= copy_to_user(&name->machine, "i686", 5); |
25 | |||
28 | return err ? -EFAULT : 0; | 26 | return err ? -EFAULT : 0; |
29 | } | 27 | } |
30 | 28 | ||
31 | long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) | 29 | long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) |
32 | { | 30 | { |
33 | unsigned long *ptr = addr, tmp; | 31 | unsigned long *ptr = addr, tmp; |
34 | long ret; | 32 | long ret; |
35 | int pid = task->mm->context.skas.id.u.pid; | 33 | int pid = task->mm->context.skas.id.u.pid; |
36 | 34 | ||
@@ -47,42 +45,42 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) | |||
47 | * arch_prctl is run on the host, then the registers are read | 45 | * arch_prctl is run on the host, then the registers are read |
48 | * back. | 46 | * back. |
49 | */ | 47 | */ |
50 | switch(code){ | 48 | switch (code) { |
51 | case ARCH_SET_FS: | 49 | case ARCH_SET_FS: |
52 | case ARCH_SET_GS: | 50 | case ARCH_SET_GS: |
53 | restore_registers(pid, ¤t->thread.regs.regs); | 51 | restore_registers(pid, ¤t->thread.regs.regs); |
54 | break; | 52 | break; |
55 | case ARCH_GET_FS: | 53 | case ARCH_GET_FS: |
56 | case ARCH_GET_GS: | 54 | case ARCH_GET_GS: |
57 | /* | 55 | /* |
58 | * With these two, we read to a local pointer and | 56 | * With these two, we read to a local pointer and |
59 | * put_user it to the userspace pointer that we were | 57 | * put_user it to the userspace pointer that we were |
60 | * given. If addr isn't valid (because it hasn't been | 58 | * given. If addr isn't valid (because it hasn't been |
61 | * faulted in or is just bogus), we want put_user to | 59 | * faulted in or is just bogus), we want put_user to |
62 | * fault it in (or return -EFAULT) instead of having | 60 | * fault it in (or return -EFAULT) instead of having |
63 | * the host return -EFAULT. | 61 | * the host return -EFAULT. |
64 | */ | 62 | */ |
65 | ptr = &tmp; | 63 | ptr = &tmp; |
66 | } | 64 | } |
67 | 65 | ||
68 | ret = os_arch_prctl(pid, code, ptr); | 66 | ret = os_arch_prctl(pid, code, ptr); |
69 | if(ret) | 67 | if (ret) |
70 | return ret; | 68 | return ret; |
71 | 69 | ||
72 | switch(code){ | 70 | switch (code) { |
73 | case ARCH_SET_FS: | 71 | case ARCH_SET_FS: |
74 | current->thread.arch.fs = (unsigned long) ptr; | 72 | current->thread.arch.fs = (unsigned long) ptr; |
75 | save_registers(pid, ¤t->thread.regs.regs); | 73 | save_registers(pid, ¤t->thread.regs.regs); |
76 | break; | 74 | break; |
77 | case ARCH_SET_GS: | 75 | case ARCH_SET_GS: |
78 | save_registers(pid, ¤t->thread.regs.regs); | 76 | save_registers(pid, ¤t->thread.regs.regs); |
79 | break; | 77 | break; |
80 | case ARCH_GET_FS: | 78 | case ARCH_GET_FS: |
81 | ret = put_user(tmp, addr); | 79 | ret = put_user(tmp, addr); |
82 | break; | 80 | break; |
83 | case ARCH_GET_GS: | 81 | case ARCH_GET_GS: |
84 | ret = put_user(tmp, addr); | 82 | ret = put_user(tmp, addr); |
85 | break; | 83 | break; |
86 | } | 84 | } |
87 | 85 | ||
88 | return ret; | 86 | return ret; |
@@ -109,8 +107,8 @@ long sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
109 | 107 | ||
110 | void arch_switch_to(struct task_struct *from, struct task_struct *to) | 108 | void arch_switch_to(struct task_struct *from, struct task_struct *to) |
111 | { | 109 | { |
112 | if((to->thread.arch.fs == 0) || (to->mm == NULL)) | 110 | if ((to->thread.arch.fs == 0) || (to->mm == NULL)) |
113 | return; | 111 | return; |
114 | 112 | ||
115 | arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); | 113 | arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); |
116 | } | 114 | } |
diff --git a/arch/um/sys-x86_64/tls.c b/arch/um/sys-x86_64/tls.c index fcd5217c26a5..3bd19a51ad6b 100644 --- a/arch/um/sys-x86_64/tls.c +++ b/arch/um/sys-x86_64/tls.c | |||
@@ -13,5 +13,5 @@ int arch_copy_tls(struct task_struct *t) | |||
13 | */ | 13 | */ |
14 | t->thread.arch.fs = t->thread.regs.regs.regs[R8 / sizeof(long)]; | 14 | t->thread.arch.fs = t->thread.regs.regs.regs[R8 / sizeof(long)]; |
15 | 15 | ||
16 | return 0; | 16 | return 0; |
17 | } | 17 | } |
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h index 0c4a375000a9..035fd1c363ea 100644 --- a/include/asm-um/mmu_context.h +++ b/include/asm-um/mmu_context.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -55,14 +55,3 @@ extern int init_new_context(struct task_struct *task, struct mm_struct *mm); | |||
55 | extern void destroy_context(struct mm_struct *mm); | 55 | extern void destroy_context(struct mm_struct *mm); |
56 | 56 | ||
57 | #endif | 57 | #endif |
58 | |||
59 | /* | ||
60 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
61 | * Emacs will notice this stuff at the end of the file and automatically | ||
62 | * adjust the settings for this buffer only. This must remain at the end | ||
63 | * of the file. | ||
64 | * --------------------------------------------------------------------------- | ||
65 | * Local variables: | ||
66 | * c-file-style: "linux" | ||
67 | * End: | ||
68 | */ | ||
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h index 126df73f5401..d40eae9ec9a2 100644 --- a/include/asm-um/processor-generic.h +++ b/include/asm-um/processor-generic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -17,11 +17,14 @@ struct task_struct; | |||
17 | struct mm_struct; | 17 | struct mm_struct; |
18 | 18 | ||
19 | struct thread_struct { | 19 | struct thread_struct { |
20 | /* This flag is set to 1 before calling do_fork (and analyzed in | 20 | struct task_struct *saved_task; |
21 | /* | ||
22 | * This flag is set to 1 before calling do_fork (and analyzed in | ||
21 | * copy_thread) to mark that we are begin called from userspace (fork / | 23 | * copy_thread) to mark that we are begin called from userspace (fork / |
22 | * vfork / clone), and reset to 0 after. It is left to 0 when called | 24 | * vfork / clone), and reset to 0 after. It is left to 0 when called |
23 | * from kernelspace (i.e. kernel_thread() or fork_idle(), as of 2.6.11). */ | 25 | * from kernelspace (i.e. kernel_thread() or fork_idle(), |
24 | struct task_struct *saved_task; | 26 | * as of 2.6.11). |
27 | */ | ||
25 | int forking; | 28 | int forking; |
26 | int nsyscalls; | 29 | int nsyscalls; |
27 | struct pt_regs regs; | 30 | struct pt_regs regs; |
@@ -56,7 +59,7 @@ struct thread_struct { | |||
56 | { \ | 59 | { \ |
57 | .forking = 0, \ | 60 | .forking = 0, \ |
58 | .nsyscalls = 0, \ | 61 | .nsyscalls = 0, \ |
59 | .regs = EMPTY_REGS, \ | 62 | .regs = EMPTY_REGS, \ |
60 | .fault_addr = NULL, \ | 63 | .fault_addr = NULL, \ |
61 | .prev_sched = NULL, \ | 64 | .prev_sched = NULL, \ |
62 | .temp_stack = 0, \ | 65 | .temp_stack = 0, \ |
diff --git a/include/asm-um/ptrace-generic.h b/include/asm-um/ptrace-generic.h index c8b3e6bf6a43..919581d713bd 100644 --- a/include/asm-um/ptrace-generic.h +++ b/include/asm-um/ptrace-generic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
diff --git a/include/asm-um/ptrace-i386.h b/include/asm-um/ptrace-i386.h index b733fa32b618..65102c883440 100644 --- a/include/asm-um/ptrace-i386.h +++ b/include/asm-um/ptrace-i386.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -9,9 +9,8 @@ | |||
9 | #define HOST_AUDIT_ARCH AUDIT_ARCH_I386 | 9 | #define HOST_AUDIT_ARCH AUDIT_ARCH_I386 |
10 | 10 | ||
11 | #include "linux/compiler.h" | 11 | #include "linux/compiler.h" |
12 | #include "sysdep/ptrace.h" | ||
13 | #include "asm/ptrace-generic.h" | 12 | #include "asm/ptrace-generic.h" |
14 | #include "asm/host_ldt.h" | 13 | #include "sysdep/ptrace.h" |
15 | 14 | ||
16 | #define PT_REGS_EAX(r) UPT_EAX(&(r)->regs) | 15 | #define PT_REGS_EAX(r) UPT_EAX(&(r)->regs) |
17 | #define PT_REGS_EBX(r) UPT_EBX(&(r)->regs) | 16 | #define PT_REGS_EBX(r) UPT_EBX(&(r)->regs) |
@@ -40,6 +39,12 @@ | |||
40 | 39 | ||
41 | #define user_mode(r) UPT_IS_USER(&(r)->regs) | 40 | #define user_mode(r) UPT_IS_USER(&(r)->regs) |
42 | 41 | ||
42 | /* | ||
43 | * Forward declaration to avoid including sysdep/tls.h, which causes a | ||
44 | * circular include, and compilation failures. | ||
45 | */ | ||
46 | struct user_desc; | ||
47 | |||
43 | extern int ptrace_get_thread_area(struct task_struct *child, int idx, | 48 | extern int ptrace_get_thread_area(struct task_struct *child, int idx, |
44 | struct user_desc __user *user_desc); | 49 | struct user_desc __user *user_desc); |
45 | 50 | ||