aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2006-04-02 00:08:05 -0500
committerDmitry Torokhov <dtor_core@ameritech.net>2006-04-02 00:08:05 -0500
commit95d465fd750897ab32462a6702fbfe1b122cbbc0 (patch)
tree65c38b2f11c51bb6932e44dd6c92f15b0091abfe /arch/um/kernel
parent642fde17dceceb56c7ba2762733ac688666ae657 (diff)
parent683aa4012f53b2ada0f430487e05d37b0d94e90a (diff)
Manual merge with Linus.
Conflicts: arch/powerpc/kernel/setup-common.c drivers/input/keyboard/hil_kbd.c drivers/input/mouse/hil_ptr.c
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/Makefile9
-rw-r--r--arch/um/kernel/exec_kern.c18
-rw-r--r--arch/um/kernel/irq.c294
-rw-r--r--arch/um/kernel/irq_user.c412
-rw-r--r--arch/um/kernel/mem.c6
-rw-r--r--arch/um/kernel/physmem.c5
-rw-r--r--arch/um/kernel/process_kern.c26
-rw-r--r--arch/um/kernel/ptrace.c44
-rw-r--r--arch/um/kernel/sigio_kern.c10
-rw-r--r--arch/um/kernel/sigio_user.c466
-rw-r--r--arch/um/kernel/skas/process_kern.c11
-rw-r--r--arch/um/kernel/smp.c15
-rw-r--r--arch/um/kernel/syscall_kern.c4
-rw-r--r--arch/um/kernel/trap_kern.c8
-rw-r--r--arch/um/kernel/tt/process_kern.c10
-rw-r--r--arch/um/kernel/tty_log.c230
-rw-r--r--arch/um/kernel/um_arch.c24
17 files changed, 406 insertions, 1186 deletions
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 693018ba80f1..fe08971b64cf 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -7,23 +7,20 @@ extra-y := vmlinux.lds
7clean-files := 7clean-files :=
8 8
9obj-y = config.o exec_kern.o exitcode.o \ 9obj-y = config.o exec_kern.o exitcode.o \
10 init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \ 10 init_task.o irq.o ksyms.o mem.o physmem.o \
11 process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \ 11 process_kern.o ptrace.o reboot.o resource.o sigio_kern.o \
12 signal_kern.o smp.o syscall_kern.o sysrq.o \ 12 signal_kern.o smp.o syscall_kern.o sysrq.o \
13 time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o 13 time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o
14 14
15obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o 15obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
16obj-$(CONFIG_GPROF) += gprof_syms.o 16obj-$(CONFIG_GPROF) += gprof_syms.o
17obj-$(CONFIG_GCOV) += gmon_syms.o 17obj-$(CONFIG_GCOV) += gmon_syms.o
18obj-$(CONFIG_TTY_LOG) += tty_log.o
19obj-$(CONFIG_SYSCALL_DEBUG) += syscall.o 18obj-$(CONFIG_SYSCALL_DEBUG) += syscall.o
20 19
21obj-$(CONFIG_MODE_TT) += tt/ 20obj-$(CONFIG_MODE_TT) += tt/
22obj-$(CONFIG_MODE_SKAS) += skas/ 21obj-$(CONFIG_MODE_SKAS) += skas/
23 22
24user-objs-$(CONFIG_TTY_LOG) += tty_log.o 23USER_OBJS := config.o
25
26USER_OBJS := $(user-objs-y) config.o tty_log.o
27 24
28include arch/um/scripts/Makefile.rules 25include arch/um/scripts/Makefile.rules
29 26
diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c
index c264e1c05ab3..c0cb627bf594 100644
--- a/arch/um/kernel/exec_kern.c
+++ b/arch/um/kernel/exec_kern.c
@@ -22,6 +22,7 @@
22 22
23void flush_thread(void) 23void flush_thread(void)
24{ 24{
25 arch_flush_thread(&current->thread.arch);
25 CHOOSE_MODE(flush_thread_tt(), flush_thread_skas()); 26 CHOOSE_MODE(flush_thread_tt(), flush_thread_skas());
26} 27}
27 28
@@ -30,8 +31,6 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
30 CHOOSE_MODE_PROC(start_thread_tt, start_thread_skas, regs, eip, esp); 31 CHOOSE_MODE_PROC(start_thread_tt, start_thread_skas, regs, eip, esp);
31} 32}
32 33
33extern void log_exec(char **argv, void *tty);
34
35static long execve1(char *file, char __user * __user *argv, 34static long execve1(char *file, char __user * __user *argv,
36 char __user *__user *env) 35 char __user *__user *env)
37{ 36{
@@ -60,14 +59,14 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
60 return(err); 59 return(err);
61} 60}
62 61
63long sys_execve(char *file, char __user *__user *argv, 62long sys_execve(char __user *file, char __user *__user *argv,
64 char __user *__user *env) 63 char __user *__user *env)
65{ 64{
66 long error; 65 long error;
67 char *filename; 66 char *filename;
68 67
69 lock_kernel(); 68 lock_kernel();
70 filename = getname((char __user *) file); 69 filename = getname(file);
71 error = PTR_ERR(filename); 70 error = PTR_ERR(filename);
72 if (IS_ERR(filename)) goto out; 71 if (IS_ERR(filename)) goto out;
73 error = execve1(filename, argv, env); 72 error = execve1(filename, argv, env);
@@ -76,14 +75,3 @@ long sys_execve(char *file, char __user *__user *argv,
76 unlock_kernel(); 75 unlock_kernel();
77 return(error); 76 return(error);
78} 77}
79
80/*
81 * Overrides for Emacs so that we follow Linus's tabbing style.
82 * Emacs will notice this stuff at the end of the file and automatically
83 * adjust the settings for this buffer only. This must remain at the end
84 * of the file.
85 * ---------------------------------------------------------------------------
86 * Local variables:
87 * c-file-style: "linux"
88 * End:
89 */
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index bbf94bf2921e..c39ea3abeda4 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -31,6 +31,8 @@
31#include "irq_user.h" 31#include "irq_user.h"
32#include "irq_kern.h" 32#include "irq_kern.h"
33#include "os.h" 33#include "os.h"
34#include "sigio.h"
35#include "misc_constants.h"
34 36
35/* 37/*
36 * Generic, controller-independent functions: 38 * Generic, controller-independent functions:
@@ -77,6 +79,298 @@ skip:
77 return 0; 79 return 0;
78} 80}
79 81
82struct irq_fd *active_fds = NULL;
83static struct irq_fd **last_irq_ptr = &active_fds;
84
85extern void free_irqs(void);
86
87void sigio_handler(int sig, union uml_pt_regs *regs)
88{
89 struct irq_fd *irq_fd;
90 int n;
91
92 if(smp_sigio_handler()) return;
93 while(1){
94 n = os_waiting_for_events(active_fds);
95 if (n <= 0) {
96 if(n == -EINTR) continue;
97 else break;
98 }
99
100 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
101 if(irq_fd->current_events != 0){
102 irq_fd->current_events = 0;
103 do_IRQ(irq_fd->irq, regs);
104 }
105 }
106 }
107
108 free_irqs();
109}
110
111static void maybe_sigio_broken(int fd, int type)
112{
113 if(os_isatty(fd)){
114 if((type == IRQ_WRITE) && !pty_output_sigio){
115 write_sigio_workaround();
116 add_sigio_fd(fd, 0);
117 }
118 else if((type == IRQ_READ) && !pty_close_sigio){
119 write_sigio_workaround();
120 add_sigio_fd(fd, 1);
121 }
122 }
123}
124
125
126int activate_fd(int irq, int fd, int type, void *dev_id)
127{
128 struct pollfd *tmp_pfd;
129 struct irq_fd *new_fd, *irq_fd;
130 unsigned long flags;
131 int pid, events, err, n;
132
133 pid = os_getpid();
134 err = os_set_fd_async(fd, pid);
135 if(err < 0)
136 goto out;
137
138 new_fd = um_kmalloc(sizeof(*new_fd));
139 err = -ENOMEM;
140 if(new_fd == NULL)
141 goto out;
142
143 if(type == IRQ_READ) events = UM_POLLIN | UM_POLLPRI;
144 else events = UM_POLLOUT;
145 *new_fd = ((struct irq_fd) { .next = NULL,
146 .id = dev_id,
147 .fd = fd,
148 .type = type,
149 .irq = irq,
150 .pid = pid,
151 .events = events,
152 .current_events = 0 } );
153
154 /* Critical section - locked by a spinlock because this stuff can
155 * be changed from interrupt handlers. The stuff above is done
156 * outside the lock because it allocates memory.
157 */
158
159 /* Actually, it only looks like it can be called from interrupt
160 * context. The culprit is reactivate_fd, which calls
161 * maybe_sigio_broken, which calls write_sigio_workaround,
162 * which calls activate_fd. However, write_sigio_workaround should
163 * only be called once, at boot time. That would make it clear that
164 * this is called only from process context, and can be locked with
165 * a semaphore.
166 */
167 flags = irq_lock();
168 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
169 if((irq_fd->fd == fd) && (irq_fd->type == type)){
170 printk("Registering fd %d twice\n", fd);
171 printk("Irqs : %d, %d\n", irq_fd->irq, irq);
172 printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
173 goto out_unlock;
174 }
175 }
176
177 /*-------------*/
178 if(type == IRQ_WRITE)
179 fd = -1;
180
181 tmp_pfd = NULL;
182 n = 0;
183
184 while(1){
185 n = os_create_pollfd(fd, events, tmp_pfd, n);
186 if (n == 0)
187 break;
188
189 /* n > 0
190 * It means we couldn't put new pollfd to current pollfds
191 * and tmp_fds is NULL or too small for new pollfds array.
192 * Needed size is equal to n as minimum.
193 *
194 * Here we have to drop the lock in order to call
195 * kmalloc, which might sleep.
196 * If something else came in and changed the pollfds array
197 * so we will not be able to put new pollfd struct to pollfds
198 * then we free the buffer tmp_fds and try again.
199 */
200 irq_unlock(flags);
201 if (tmp_pfd != NULL) {
202 kfree(tmp_pfd);
203 tmp_pfd = NULL;
204 }
205
206 tmp_pfd = um_kmalloc(n);
207 if (tmp_pfd == NULL)
208 goto out_kfree;
209
210 flags = irq_lock();
211 }
212 /*-------------*/
213
214 *last_irq_ptr = new_fd;
215 last_irq_ptr = &new_fd->next;
216
217 irq_unlock(flags);
218
219 /* This calls activate_fd, so it has to be outside the critical
220 * section.
221 */
222 maybe_sigio_broken(fd, type);
223
224 return(0);
225
226 out_unlock:
227 irq_unlock(flags);
228 out_kfree:
229 kfree(new_fd);
230 out:
231 return(err);
232}
233
234static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
235{
236 unsigned long flags;
237
238 flags = irq_lock();
239 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
240 irq_unlock(flags);
241}
242
243struct irq_and_dev {
244 int irq;
245 void *dev;
246};
247
248static int same_irq_and_dev(struct irq_fd *irq, void *d)
249{
250 struct irq_and_dev *data = d;
251
252 return((irq->irq == data->irq) && (irq->id == data->dev));
253}
254
255void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
256{
257 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
258 .dev = dev });
259
260 free_irq_by_cb(same_irq_and_dev, &data);
261}
262
263static int same_fd(struct irq_fd *irq, void *fd)
264{
265 return(irq->fd == *((int *) fd));
266}
267
268void free_irq_by_fd(int fd)
269{
270 free_irq_by_cb(same_fd, &fd);
271}
272
273static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
274{
275 struct irq_fd *irq;
276 int i = 0;
277 int fdi;
278
279 for(irq=active_fds; irq != NULL; irq = irq->next){
280 if((irq->fd == fd) && (irq->irq == irqnum)) break;
281 i++;
282 }
283 if(irq == NULL){
284 printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
285 goto out;
286 }
287 fdi = os_get_pollfd(i);
288 if((fdi != -1) && (fdi != fd)){
289 printk("find_irq_by_fd - mismatch between active_fds and "
290 "pollfds, fd %d vs %d, need %d\n", irq->fd,
291 fdi, fd);
292 irq = NULL;
293 goto out;
294 }
295 *index_out = i;
296 out:
297 return(irq);
298}
299
300void reactivate_fd(int fd, int irqnum)
301{
302 struct irq_fd *irq;
303 unsigned long flags;
304 int i;
305
306 flags = irq_lock();
307 irq = find_irq_by_fd(fd, irqnum, &i);
308 if(irq == NULL){
309 irq_unlock(flags);
310 return;
311 }
312 os_set_pollfd(i, irq->fd);
313 irq_unlock(flags);
314
315 /* This calls activate_fd, so it has to be outside the critical
316 * section.
317 */
318 maybe_sigio_broken(fd, irq->type);
319}
320
321void deactivate_fd(int fd, int irqnum)
322{
323 struct irq_fd *irq;
324 unsigned long flags;
325 int i;
326
327 flags = irq_lock();
328 irq = find_irq_by_fd(fd, irqnum, &i);
329 if(irq == NULL)
330 goto out;
331 os_set_pollfd(i, -1);
332 out:
333 irq_unlock(flags);
334}
335
336int deactivate_all_fds(void)
337{
338 struct irq_fd *irq;
339 int err;
340
341 for(irq=active_fds;irq != NULL;irq = irq->next){
342 err = os_clear_fd_async(irq->fd);
343 if(err)
344 return(err);
345 }
346 /* If there is a signal already queued, after unblocking ignore it */
347 os_set_ioignore();
348
349 return(0);
350}
351
352void forward_interrupts(int pid)
353{
354 struct irq_fd *irq;
355 unsigned long flags;
356 int err;
357
358 flags = irq_lock();
359 for(irq=active_fds;irq != NULL;irq = irq->next){
360 err = os_set_owner(irq->fd, pid);
361 if(err < 0){
362 /* XXX Just remove the irq rather than
363 * print out an infinite stream of these
364 */
365 printk("Failed to forward %d to pid %d, err = %d\n",
366 irq->fd, pid, -err);
367 }
368
369 irq->pid = pid;
370 }
371 irq_unlock(flags);
372}
373
80/* 374/*
81 * do_IRQ handles all normal device IRQ's (the special 375 * do_IRQ handles all normal device IRQ's (the special
82 * SMP cross-CPU interrupts have their own specific 376 * SMP cross-CPU interrupts have their own specific
diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c
deleted file mode 100644
index 0e32f5f4a887..000000000000
--- a/arch/um/kernel/irq_user.c
+++ /dev/null
@@ -1,412 +0,0 @@
1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <signal.h>
10#include <string.h>
11#include <sys/poll.h>
12#include <sys/types.h>
13#include <sys/time.h>
14#include "user_util.h"
15#include "kern_util.h"
16#include "user.h"
17#include "process.h"
18#include "sigio.h"
19#include "irq_user.h"
20#include "os.h"
21
22struct irq_fd {
23 struct irq_fd *next;
24 void *id;
25 int fd;
26 int type;
27 int irq;
28 int pid;
29 int events;
30 int current_events;
31};
32
33static struct irq_fd *active_fds = NULL;
34static struct irq_fd **last_irq_ptr = &active_fds;
35
36static struct pollfd *pollfds = NULL;
37static int pollfds_num = 0;
38static int pollfds_size = 0;
39
40extern int io_count, intr_count;
41
42extern void free_irqs(void);
43
44void sigio_handler(int sig, union uml_pt_regs *regs)
45{
46 struct irq_fd *irq_fd;
47 int i, n;
48
49 if(smp_sigio_handler()) return;
50 while(1){
51 n = poll(pollfds, pollfds_num, 0);
52 if(n < 0){
53 if(errno == EINTR) continue;
54 printk("sigio_handler : poll returned %d, "
55 "errno = %d\n", n, errno);
56 break;
57 }
58 if(n == 0) break;
59
60 irq_fd = active_fds;
61 for(i = 0; i < pollfds_num; i++){
62 if(pollfds[i].revents != 0){
63 irq_fd->current_events = pollfds[i].revents;
64 pollfds[i].fd = -1;
65 }
66 irq_fd = irq_fd->next;
67 }
68
69 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
70 if(irq_fd->current_events != 0){
71 irq_fd->current_events = 0;
72 do_IRQ(irq_fd->irq, regs);
73 }
74 }
75 }
76
77 free_irqs();
78}
79
80int activate_ipi(int fd, int pid)
81{
82 return(os_set_fd_async(fd, pid));
83}
84
85static void maybe_sigio_broken(int fd, int type)
86{
87 if(isatty(fd)){
88 if((type == IRQ_WRITE) && !pty_output_sigio){
89 write_sigio_workaround();
90 add_sigio_fd(fd, 0);
91 }
92 else if((type == IRQ_READ) && !pty_close_sigio){
93 write_sigio_workaround();
94 add_sigio_fd(fd, 1);
95 }
96 }
97}
98
99int activate_fd(int irq, int fd, int type, void *dev_id)
100{
101 struct pollfd *tmp_pfd;
102 struct irq_fd *new_fd, *irq_fd;
103 unsigned long flags;
104 int pid, events, err, n, size;
105
106 pid = os_getpid();
107 err = os_set_fd_async(fd, pid);
108 if(err < 0)
109 goto out;
110
111 new_fd = um_kmalloc(sizeof(*new_fd));
112 err = -ENOMEM;
113 if(new_fd == NULL)
114 goto out;
115
116 if(type == IRQ_READ) events = POLLIN | POLLPRI;
117 else events = POLLOUT;
118 *new_fd = ((struct irq_fd) { .next = NULL,
119 .id = dev_id,
120 .fd = fd,
121 .type = type,
122 .irq = irq,
123 .pid = pid,
124 .events = events,
125 .current_events = 0 } );
126
127 /* Critical section - locked by a spinlock because this stuff can
128 * be changed from interrupt handlers. The stuff above is done
129 * outside the lock because it allocates memory.
130 */
131
132 /* Actually, it only looks like it can be called from interrupt
133 * context. The culprit is reactivate_fd, which calls
134 * maybe_sigio_broken, which calls write_sigio_workaround,
135 * which calls activate_fd. However, write_sigio_workaround should
136 * only be called once, at boot time. That would make it clear that
137 * this is called only from process context, and can be locked with
138 * a semaphore.
139 */
140 flags = irq_lock();
141 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
142 if((irq_fd->fd == fd) && (irq_fd->type == type)){
143 printk("Registering fd %d twice\n", fd);
144 printk("Irqs : %d, %d\n", irq_fd->irq, irq);
145 printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id);
146 goto out_unlock;
147 }
148 }
149
150 n = pollfds_num;
151 if(n == pollfds_size){
152 while(1){
153 /* Here we have to drop the lock in order to call
154 * kmalloc, which might sleep. If something else
155 * came in and changed the pollfds array, we free
156 * the buffer and try again.
157 */
158 irq_unlock(flags);
159 size = (pollfds_num + 1) * sizeof(pollfds[0]);
160 tmp_pfd = um_kmalloc(size);
161 flags = irq_lock();
162 if(tmp_pfd == NULL)
163 goto out_unlock;
164 if(n == pollfds_size)
165 break;
166 kfree(tmp_pfd);
167 }
168 if(pollfds != NULL){
169 memcpy(tmp_pfd, pollfds,
170 sizeof(pollfds[0]) * pollfds_size);
171 kfree(pollfds);
172 }
173 pollfds = tmp_pfd;
174 pollfds_size++;
175 }
176
177 if(type == IRQ_WRITE)
178 fd = -1;
179
180 pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
181 .events = events,
182 .revents = 0 });
183 pollfds_num++;
184
185 *last_irq_ptr = new_fd;
186 last_irq_ptr = &new_fd->next;
187
188 irq_unlock(flags);
189
190 /* This calls activate_fd, so it has to be outside the critical
191 * section.
192 */
193 maybe_sigio_broken(fd, type);
194
195 return(0);
196
197 out_unlock:
198 irq_unlock(flags);
199 kfree(new_fd);
200 out:
201 return(err);
202}
203
204static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
205{
206 struct irq_fd **prev;
207 unsigned long flags;
208 int i = 0;
209
210 flags = irq_lock();
211 prev = &active_fds;
212 while(*prev != NULL){
213 if((*test)(*prev, arg)){
214 struct irq_fd *old_fd = *prev;
215 if((pollfds[i].fd != -1) &&
216 (pollfds[i].fd != (*prev)->fd)){
217 printk("free_irq_by_cb - mismatch between "
218 "active_fds and pollfds, fd %d vs %d\n",
219 (*prev)->fd, pollfds[i].fd);
220 goto out;
221 }
222
223 pollfds_num--;
224
225 /* This moves the *whole* array after pollfds[i] (though
226 * it doesn't spot as such)! */
227
228 memmove(&pollfds[i], &pollfds[i + 1],
229 (pollfds_num - i) * sizeof(pollfds[0]));
230
231 if(last_irq_ptr == &old_fd->next)
232 last_irq_ptr = prev;
233 *prev = (*prev)->next;
234 if(old_fd->type == IRQ_WRITE)
235 ignore_sigio_fd(old_fd->fd);
236 kfree(old_fd);
237 continue;
238 }
239 prev = &(*prev)->next;
240 i++;
241 }
242 out:
243 irq_unlock(flags);
244}
245
246struct irq_and_dev {
247 int irq;
248 void *dev;
249};
250
251static int same_irq_and_dev(struct irq_fd *irq, void *d)
252{
253 struct irq_and_dev *data = d;
254
255 return((irq->irq == data->irq) && (irq->id == data->dev));
256}
257
258void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
259{
260 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
261 .dev = dev });
262
263 free_irq_by_cb(same_irq_and_dev, &data);
264}
265
266static int same_fd(struct irq_fd *irq, void *fd)
267{
268 return(irq->fd == *((int *) fd));
269}
270
271void free_irq_by_fd(int fd)
272{
273 free_irq_by_cb(same_fd, &fd);
274}
275
276static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
277{
278 struct irq_fd *irq;
279 int i = 0;
280
281 for(irq=active_fds; irq != NULL; irq = irq->next){
282 if((irq->fd == fd) && (irq->irq == irqnum)) break;
283 i++;
284 }
285 if(irq == NULL){
286 printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
287 goto out;
288 }
289 if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){
290 printk("find_irq_by_fd - mismatch between active_fds and "
291 "pollfds, fd %d vs %d, need %d\n", irq->fd,
292 pollfds[i].fd, fd);
293 irq = NULL;
294 goto out;
295 }
296 *index_out = i;
297 out:
298 return(irq);
299}
300
301void reactivate_fd(int fd, int irqnum)
302{
303 struct irq_fd *irq;
304 unsigned long flags;
305 int i;
306
307 flags = irq_lock();
308 irq = find_irq_by_fd(fd, irqnum, &i);
309 if(irq == NULL){
310 irq_unlock(flags);
311 return;
312 }
313
314 pollfds[i].fd = irq->fd;
315
316 irq_unlock(flags);
317
318 /* This calls activate_fd, so it has to be outside the critical
319 * section.
320 */
321 maybe_sigio_broken(fd, irq->type);
322}
323
324void deactivate_fd(int fd, int irqnum)
325{
326 struct irq_fd *irq;
327 unsigned long flags;
328 int i;
329
330 flags = irq_lock();
331 irq = find_irq_by_fd(fd, irqnum, &i);
332 if(irq == NULL)
333 goto out;
334 pollfds[i].fd = -1;
335 out:
336 irq_unlock(flags);
337}
338
339int deactivate_all_fds(void)
340{
341 struct irq_fd *irq;
342 int err;
343
344 for(irq=active_fds;irq != NULL;irq = irq->next){
345 err = os_clear_fd_async(irq->fd);
346 if(err)
347 return(err);
348 }
349 /* If there is a signal already queued, after unblocking ignore it */
350 set_handler(SIGIO, SIG_IGN, 0, -1);
351
352 return(0);
353}
354
355void forward_ipi(int fd, int pid)
356{
357 int err;
358
359 err = os_set_owner(fd, pid);
360 if(err < 0)
361 printk("forward_ipi: set_owner failed, fd = %d, me = %d, "
362 "target = %d, err = %d\n", fd, os_getpid(), pid, -err);
363}
364
365void forward_interrupts(int pid)
366{
367 struct irq_fd *irq;
368 unsigned long flags;
369 int err;
370
371 flags = irq_lock();
372 for(irq=active_fds;irq != NULL;irq = irq->next){
373 err = os_set_owner(irq->fd, pid);
374 if(err < 0){
375 /* XXX Just remove the irq rather than
376 * print out an infinite stream of these
377 */
378 printk("Failed to forward %d to pid %d, err = %d\n",
379 irq->fd, pid, -err);
380 }
381
382 irq->pid = pid;
383 }
384 irq_unlock(flags);
385}
386
387void init_irq_signals(int on_sigstack)
388{
389 __sighandler_t h;
390 int flags;
391
392 flags = on_sigstack ? SA_ONSTACK : 0;
393 if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
394 else h = boot_timer_handler;
395
396 set_handler(SIGVTALRM, h, flags | SA_RESTART,
397 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
398 set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
399 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
400 signal(SIGWINCH, SIG_IGN);
401}
402
403/*
404 * Overrides for Emacs so that we follow Linus's tabbing style.
405 * Emacs will notice this stuff at the end of the file and automatically
406 * adjust the settings for this buffer only. This must remain at the end
407 * of the file.
408 * ---------------------------------------------------------------------------
409 * Local variables:
410 * c-file-style: "linux"
411 * End:
412 */
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index fa4f915be5c5..44e41a35f000 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -30,7 +30,7 @@ extern char __binary_start;
30unsigned long *empty_zero_page = NULL; 30unsigned long *empty_zero_page = NULL;
31unsigned long *empty_bad_page = NULL; 31unsigned long *empty_bad_page = NULL;
32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 32pgd_t swapper_pg_dir[PTRS_PER_PGD];
33unsigned long highmem; 33unsigned long long highmem;
34int kmalloc_ok = 0; 34int kmalloc_ok = 0;
35 35
36static unsigned long brk_end; 36static unsigned long brk_end;
@@ -57,7 +57,7 @@ static void setup_highmem(unsigned long highmem_start,
57 for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){ 57 for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){
58 page = &mem_map[highmem_pfn + i]; 58 page = &mem_map[highmem_pfn + i];
59 ClearPageReserved(page); 59 ClearPageReserved(page);
60 set_page_count(page, 1); 60 init_page_count(page);
61 __free_page(page); 61 __free_page(page);
62 } 62 }
63} 63}
@@ -296,7 +296,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
296 (end - start) >> 10); 296 (end - start) >> 10);
297 for (; start < end; start += PAGE_SIZE) { 297 for (; start < end; start += PAGE_SIZE) {
298 ClearPageReserved(virt_to_page(start)); 298 ClearPageReserved(virt_to_page(start));
299 set_page_count(virt_to_page(start), 1); 299 init_page_count(virt_to_page(start));
300 free_page(start); 300 free_page(start);
301 totalram_pages++; 301 totalram_pages++;
302 } 302 }
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 544665e04513..0500800df1c1 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -9,6 +9,7 @@
9#include "linux/vmalloc.h" 9#include "linux/vmalloc.h"
10#include "linux/bootmem.h" 10#include "linux/bootmem.h"
11#include "linux/module.h" 11#include "linux/module.h"
12#include "linux/pfn.h"
12#include "asm/types.h" 13#include "asm/types.h"
13#include "asm/pgtable.h" 14#include "asm/pgtable.h"
14#include "kern_util.h" 15#include "kern_util.h"
@@ -279,7 +280,7 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
279 280
280 for(i = 0; i < total_pages; i++){ 281 for(i = 0; i < total_pages; i++){
281 p = &map[i]; 282 p = &map[i];
282 set_page_count(p, 0); 283 memset(p, 0, sizeof(struct page));
283 SetPageReserved(p); 284 SetPageReserved(p);
284 INIT_LIST_HEAD(&p->lru); 285 INIT_LIST_HEAD(&p->lru);
285 } 286 }
@@ -316,8 +317,6 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
316 } 317 }
317} 318}
318 319
319#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
320
321extern int __syscall_stub_start, __binary_start; 320extern int __syscall_stub_start, __binary_start;
322 321
323void setup_physmem(unsigned long start, unsigned long reserve_end, 322void setup_physmem(unsigned long start, unsigned long reserve_end,
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 3113cab8675e..f6a5a502120b 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -156,9 +156,25 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
156 unsigned long stack_top, struct task_struct * p, 156 unsigned long stack_top, struct task_struct * p,
157 struct pt_regs *regs) 157 struct pt_regs *regs)
158{ 158{
159 int ret;
160
159 p->thread = (struct thread_struct) INIT_THREAD; 161 p->thread = (struct thread_struct) INIT_THREAD;
160 return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, 162 ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
161 clone_flags, sp, stack_top, p, regs)); 163 clone_flags, sp, stack_top, p, regs);
164
165 if (ret || !current->thread.forking)
166 goto out;
167
168 clear_flushed_tls(p);
169
170 /*
171 * Set a new TLS for the child thread?
172 */
173 if (clone_flags & CLONE_SETTLS)
174 ret = arch_copy_tls(p);
175
176out:
177 return ret;
162} 178}
163 179
164void initial_thread_cb(void (*proc)(void *), void *arg) 180void initial_thread_cb(void (*proc)(void *), void *arg)
@@ -185,10 +201,6 @@ void default_idle(void)
185{ 201{
186 CHOOSE_MODE(uml_idle_timer(), (void) 0); 202 CHOOSE_MODE(uml_idle_timer(), (void) 0);
187 203
188 atomic_inc(&init_mm.mm_count);
189 current->mm = &init_mm;
190 current->active_mm = &init_mm;
191
192 while(1){ 204 while(1){
193 /* endless idle loop with no priority at all */ 205 /* endless idle loop with no priority at all */
194 206
@@ -407,7 +419,7 @@ static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int
407 return strlen(buf); 419 return strlen(buf);
408} 420}
409 421
410static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data) 422static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
411{ 423{
412 char tmp[2]; 424 char tmp[2];
413 425
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 98e09395c093..60d2eda995c1 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -46,6 +46,7 @@ extern int poke_user(struct task_struct * child, long addr, long data);
46long arch_ptrace(struct task_struct *child, long request, long addr, long data) 46long arch_ptrace(struct task_struct *child, long request, long addr, long data)
47{ 47{
48 int i, ret; 48 int i, ret;
49 unsigned long __user *p = (void __user *)(unsigned long)data;
49 50
50 switch (request) { 51 switch (request) {
51 /* when I and D space are separate, these will need to be fixed. */ 52 /* when I and D space are separate, these will need to be fixed. */
@@ -58,7 +59,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
58 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 59 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
59 if (copied != sizeof(tmp)) 60 if (copied != sizeof(tmp))
60 break; 61 break;
61 ret = put_user(tmp, (unsigned long __user *) data); 62 ret = put_user(tmp, p);
62 break; 63 break;
63 } 64 }
64 65
@@ -136,15 +137,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
136 137
137#ifdef PTRACE_GETREGS 138#ifdef PTRACE_GETREGS
138 case PTRACE_GETREGS: { /* Get all gp regs from the child. */ 139 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
139 if (!access_ok(VERIFY_WRITE, (unsigned long *)data, 140 if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
140 MAX_REG_OFFSET)) {
141 ret = -EIO; 141 ret = -EIO;
142 break; 142 break;
143 } 143 }
144 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) { 144 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
145 __put_user(getreg(child, i), 145 __put_user(getreg(child, i), p);
146 (unsigned long __user *) data); 146 p++;
147 data += sizeof(long);
148 } 147 }
149 ret = 0; 148 ret = 0;
150 break; 149 break;
@@ -153,15 +152,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
153#ifdef PTRACE_SETREGS 152#ifdef PTRACE_SETREGS
154 case PTRACE_SETREGS: { /* Set all gp regs in the child. */ 153 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
155 unsigned long tmp = 0; 154 unsigned long tmp = 0;
156 if (!access_ok(VERIFY_READ, (unsigned *)data, 155 if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
157 MAX_REG_OFFSET)) {
158 ret = -EIO; 156 ret = -EIO;
159 break; 157 break;
160 } 158 }
161 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) { 159 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
162 __get_user(tmp, (unsigned long __user *) data); 160 __get_user(tmp, p);
163 putreg(child, i, tmp); 161 putreg(child, i, tmp);
164 data += sizeof(long); 162 p++;
165 } 163 }
166 ret = 0; 164 ret = 0;
167 break; 165 break;
@@ -187,14 +185,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
187 ret = set_fpxregs(data, child); 185 ret = set_fpxregs(data, child);
188 break; 186 break;
189#endif 187#endif
188 case PTRACE_GET_THREAD_AREA:
189 ret = ptrace_get_thread_area(child, addr,
190 (struct user_desc __user *) data);
191 break;
192
193 case PTRACE_SET_THREAD_AREA:
194 ret = ptrace_set_thread_area(child, addr,
195 (struct user_desc __user *) data);
196 break;
197
190 case PTRACE_FAULTINFO: { 198 case PTRACE_FAULTINFO: {
191 /* Take the info from thread->arch->faultinfo, 199 /* Take the info from thread->arch->faultinfo,
192 * but transfer max. sizeof(struct ptrace_faultinfo). 200 * but transfer max. sizeof(struct ptrace_faultinfo).
193 * On i386, ptrace_faultinfo is smaller! 201 * On i386, ptrace_faultinfo is smaller!
194 */ 202 */
195 ret = copy_to_user((unsigned long __user *) data, 203 ret = copy_to_user(p, &child->thread.arch.faultinfo,
196 &child->thread.arch.faultinfo, 204 sizeof(struct ptrace_faultinfo));
197 sizeof(struct ptrace_faultinfo));
198 if(ret) 205 if(ret)
199 break; 206 break;
200 break; 207 break;
@@ -204,8 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
204 case PTRACE_LDT: { 211 case PTRACE_LDT: {
205 struct ptrace_ldt ldt; 212 struct ptrace_ldt ldt;
206 213
207 if(copy_from_user(&ldt, (unsigned long __user *) data, 214 if(copy_from_user(&ldt, p, sizeof(ldt))){
208 sizeof(ldt))){
209 ret = -EIO; 215 ret = -EIO;
210 break; 216 break;
211 } 217 }
diff --git a/arch/um/kernel/sigio_kern.c b/arch/um/kernel/sigio_kern.c
index 229988463c4c..1c1300fb1e95 100644
--- a/arch/um/kernel/sigio_kern.c
+++ b/arch/um/kernel/sigio_kern.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -12,13 +12,16 @@
12#include "sigio.h" 12#include "sigio.h"
13#include "irq_user.h" 13#include "irq_user.h"
14#include "irq_kern.h" 14#include "irq_kern.h"
15#include "os.h"
15 16
16/* Protected by sigio_lock() called from write_sigio_workaround */ 17/* Protected by sigio_lock() called from write_sigio_workaround */
17static int sigio_irq_fd = -1; 18static int sigio_irq_fd = -1;
18 19
19static irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused) 20static irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused)
20{ 21{
21 read_sigio_fd(sigio_irq_fd); 22 char c;
23
24 os_read_file(sigio_irq_fd, &c, sizeof(c));
22 reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ); 25 reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ);
23 return(IRQ_HANDLED); 26 return(IRQ_HANDLED);
24} 27}
@@ -51,6 +54,9 @@ void sigio_unlock(void)
51 spin_unlock(&sigio_spinlock); 54 spin_unlock(&sigio_spinlock);
52} 55}
53 56
57extern void sigio_cleanup(void);
58__uml_exitcall(sigio_cleanup);
59
54/* 60/*
55 * Overrides for Emacs so that we follow Linus's tabbing style. 61 * Overrides for Emacs so that we follow Linus's tabbing style.
56 * Emacs will notice this stuff at the end of the file and automatically 62 * Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
deleted file mode 100644
index f7b18e157d35..000000000000
--- a/arch/um/kernel/sigio_user.c
+++ /dev/null
@@ -1,466 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <unistd.h>
7#include <stdlib.h>
8#include <termios.h>
9#include <pty.h>
10#include <signal.h>
11#include <errno.h>
12#include <string.h>
13#include <sched.h>
14#include <sys/socket.h>
15#include <sys/poll.h>
16#include "init.h"
17#include "user.h"
18#include "kern_util.h"
19#include "user_util.h"
20#include "sigio.h"
21#include "os.h"
22
23/* Changed during early boot */
24int pty_output_sigio = 0;
25int pty_close_sigio = 0;
26
27/* Used as a flag during SIGIO testing early in boot */
28static volatile int got_sigio = 0;
29
30void __init handler(int sig)
31{
32 got_sigio = 1;
33}
34
35struct openpty_arg {
36 int master;
37 int slave;
38 int err;
39};
40
41static void openpty_cb(void *arg)
42{
43 struct openpty_arg *info = arg;
44
45 info->err = 0;
46 if(openpty(&info->master, &info->slave, NULL, NULL, NULL))
47 info->err = -errno;
48}
49
50void __init check_one_sigio(void (*proc)(int, int))
51{
52 struct sigaction old, new;
53 struct openpty_arg pty = { .master = -1, .slave = -1 };
54 int master, slave, err;
55
56 initial_thread_cb(openpty_cb, &pty);
57 if(pty.err){
58 printk("openpty failed, errno = %d\n", -pty.err);
59 return;
60 }
61
62 master = pty.master;
63 slave = pty.slave;
64
65 if((master == -1) || (slave == -1)){
66 printk("openpty failed to allocate a pty\n");
67 return;
68 }
69
70 /* Not now, but complain so we now where we failed. */
71 err = raw(master);
72 if (err < 0)
73 panic("check_sigio : __raw failed, errno = %d\n", -err);
74
75 err = os_sigio_async(master, slave);
76 if(err < 0)
77 panic("tty_fds : sigio_async failed, err = %d\n", -err);
78
79 if(sigaction(SIGIO, NULL, &old) < 0)
80 panic("check_sigio : sigaction 1 failed, errno = %d\n", errno);
81 new = old;
82 new.sa_handler = handler;
83 if(sigaction(SIGIO, &new, NULL) < 0)
84 panic("check_sigio : sigaction 2 failed, errno = %d\n", errno);
85
86 got_sigio = 0;
87 (*proc)(master, slave);
88
89 os_close_file(master);
90 os_close_file(slave);
91
92 if(sigaction(SIGIO, &old, NULL) < 0)
93 panic("check_sigio : sigaction 3 failed, errno = %d\n", errno);
94}
95
96static void tty_output(int master, int slave)
97{
98 int n;
99 char buf[512];
100
101 printk("Checking that host ptys support output SIGIO...");
102
103 memset(buf, 0, sizeof(buf));
104
105 while(os_write_file(master, buf, sizeof(buf)) > 0) ;
106 if(errno != EAGAIN)
107 panic("check_sigio : write failed, errno = %d\n", errno);
108 while(((n = os_read_file(slave, buf, sizeof(buf))) > 0) && !got_sigio) ;
109
110 if (got_sigio) {
111 printk("Yes\n");
112 pty_output_sigio = 1;
113 } else if (n == -EAGAIN) {
114 printk("No, enabling workaround\n");
115 } else {
116 panic("check_sigio : read failed, err = %d\n", n);
117 }
118}
119
120static void tty_close(int master, int slave)
121{
122 printk("Checking that host ptys support SIGIO on close...");
123
124 os_close_file(slave);
125 if(got_sigio){
126 printk("Yes\n");
127 pty_close_sigio = 1;
128 }
129 else printk("No, enabling workaround\n");
130}
131
132void __init check_sigio(void)
133{
134 if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) &&
135 (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){
136 printk("No pseudo-terminals available - skipping pty SIGIO "
137 "check\n");
138 return;
139 }
140 check_one_sigio(tty_output);
141 check_one_sigio(tty_close);
142}
143
144/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
145 * exitcall.
146 */
147static int write_sigio_pid = -1;
148
149/* These arrays are initialized before the sigio thread is started, and
150 * the descriptors closed after it is killed. So, it can't see them change.
151 * On the UML side, they are changed under the sigio_lock.
152 */
153static int write_sigio_fds[2] = { -1, -1 };
154static int sigio_private[2] = { -1, -1 };
155
156struct pollfds {
157 struct pollfd *poll;
158 int size;
159 int used;
160};
161
162/* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
163 * synchronizes with it.
164 */
165struct pollfds current_poll = {
166 .poll = NULL,
167 .size = 0,
168 .used = 0
169};
170
171struct pollfds next_poll = {
172 .poll = NULL,
173 .size = 0,
174 .used = 0
175};
176
177static int write_sigio_thread(void *unused)
178{
179 struct pollfds *fds, tmp;
180 struct pollfd *p;
181 int i, n, respond_fd;
182 char c;
183
184 signal(SIGWINCH, SIG_IGN);
185 fds = &current_poll;
186 while(1){
187 n = poll(fds->poll, fds->used, -1);
188 if(n < 0){
189 if(errno == EINTR) continue;
190 printk("write_sigio_thread : poll returned %d, "
191 "errno = %d\n", n, errno);
192 }
193 for(i = 0; i < fds->used; i++){
194 p = &fds->poll[i];
195 if(p->revents == 0) continue;
196 if(p->fd == sigio_private[1]){
197 n = os_read_file(sigio_private[1], &c, sizeof(c));
198 if(n != sizeof(c))
199 printk("write_sigio_thread : "
200 "read failed, err = %d\n", -n);
201 tmp = current_poll;
202 current_poll = next_poll;
203 next_poll = tmp;
204 respond_fd = sigio_private[1];
205 }
206 else {
207 respond_fd = write_sigio_fds[1];
208 fds->used--;
209 memmove(&fds->poll[i], &fds->poll[i + 1],
210 (fds->used - i) * sizeof(*fds->poll));
211 }
212
213 n = os_write_file(respond_fd, &c, sizeof(c));
214 if(n != sizeof(c))
215 printk("write_sigio_thread : write failed, "
216 "err = %d\n", -n);
217 }
218 }
219
220 return 0;
221}
222
223static int need_poll(int n)
224{
225 if(n <= next_poll.size){
226 next_poll.used = n;
227 return(0);
228 }
229 kfree(next_poll.poll);
230 next_poll.poll = um_kmalloc_atomic(n * sizeof(struct pollfd));
231 if(next_poll.poll == NULL){
232 printk("need_poll : failed to allocate new pollfds\n");
233 next_poll.size = 0;
234 next_poll.used = 0;
235 return(-1);
236 }
237 next_poll.size = n;
238 next_poll.used = n;
239 return(0);
240}
241
242/* Must be called with sigio_lock held, because it's needed by the marked
243 * critical section. */
244static void update_thread(void)
245{
246 unsigned long flags;
247 int n;
248 char c;
249
250 flags = set_signals(0);
251 n = os_write_file(sigio_private[0], &c, sizeof(c));
252 if(n != sizeof(c)){
253 printk("update_thread : write failed, err = %d\n", -n);
254 goto fail;
255 }
256
257 n = os_read_file(sigio_private[0], &c, sizeof(c));
258 if(n != sizeof(c)){
259 printk("update_thread : read failed, err = %d\n", -n);
260 goto fail;
261 }
262
263 set_signals(flags);
264 return;
265 fail:
266 /* Critical section start */
267 if(write_sigio_pid != -1)
268 os_kill_process(write_sigio_pid, 1);
269 write_sigio_pid = -1;
270 os_close_file(sigio_private[0]);
271 os_close_file(sigio_private[1]);
272 os_close_file(write_sigio_fds[0]);
273 os_close_file(write_sigio_fds[1]);
274 /* Critical section end */
275 set_signals(flags);
276}
277
278int add_sigio_fd(int fd, int read)
279{
280 int err = 0, i, n, events;
281
282 sigio_lock();
283 for(i = 0; i < current_poll.used; i++){
284 if(current_poll.poll[i].fd == fd)
285 goto out;
286 }
287
288 n = current_poll.used + 1;
289 err = need_poll(n);
290 if(err)
291 goto out;
292
293 for(i = 0; i < current_poll.used; i++)
294 next_poll.poll[i] = current_poll.poll[i];
295
296 if(read) events = POLLIN;
297 else events = POLLOUT;
298
299 next_poll.poll[n - 1] = ((struct pollfd) { .fd = fd,
300 .events = events,
301 .revents = 0 });
302 update_thread();
303 out:
304 sigio_unlock();
305 return(err);
306}
307
308int ignore_sigio_fd(int fd)
309{
310 struct pollfd *p;
311 int err = 0, i, n = 0;
312
313 sigio_lock();
314 for(i = 0; i < current_poll.used; i++){
315 if(current_poll.poll[i].fd == fd) break;
316 }
317 if(i == current_poll.used)
318 goto out;
319
320 err = need_poll(current_poll.used - 1);
321 if(err)
322 goto out;
323
324 for(i = 0; i < current_poll.used; i++){
325 p = &current_poll.poll[i];
326 if(p->fd != fd) next_poll.poll[n++] = current_poll.poll[i];
327 }
328 if(n == i){
329 printk("ignore_sigio_fd : fd %d not found\n", fd);
330 err = -1;
331 goto out;
332 }
333
334 update_thread();
335 out:
336 sigio_unlock();
337 return(err);
338}
339
340static struct pollfd* setup_initial_poll(int fd)
341{
342 struct pollfd *p;
343
344 p = um_kmalloc(sizeof(struct pollfd));
345 if (p == NULL) {
346 printk("setup_initial_poll : failed to allocate poll\n");
347 return NULL;
348 }
349 *p = ((struct pollfd) { .fd = fd,
350 .events = POLLIN,
351 .revents = 0 });
352 return p;
353}
354
355void write_sigio_workaround(void)
356{
357 unsigned long stack;
358 struct pollfd *p;
359 int err;
360 int l_write_sigio_fds[2];
361 int l_sigio_private[2];
362 int l_write_sigio_pid;
363
364 /* We call this *tons* of times - and most ones we must just fail. */
365 sigio_lock();
366 l_write_sigio_pid = write_sigio_pid;
367 sigio_unlock();
368
369 if (l_write_sigio_pid != -1)
370 return;
371
372 err = os_pipe(l_write_sigio_fds, 1, 1);
373 if(err < 0){
374 printk("write_sigio_workaround - os_pipe 1 failed, "
375 "err = %d\n", -err);
376 return;
377 }
378 err = os_pipe(l_sigio_private, 1, 1);
379 if(err < 0){
380 printk("write_sigio_workaround - os_pipe 1 failed, "
381 "err = %d\n", -err);
382 goto out_close1;
383 }
384
385 p = setup_initial_poll(l_sigio_private[1]);
386 if(!p)
387 goto out_close2;
388
389 sigio_lock();
390
391 /* Did we race? Don't try to optimize this, please, it's not so likely
392 * to happen, and no more than once at the boot. */
393 if(write_sigio_pid != -1)
394 goto out_unlock;
395
396 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
397 CLONE_FILES | CLONE_VM, &stack, 0);
398
399 if (write_sigio_pid < 0)
400 goto out_clear;
401
402 if (write_sigio_irq(l_write_sigio_fds[0]))
403 goto out_kill;
404
405 /* Success, finally. */
406 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
407 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
408
409 current_poll = ((struct pollfds) { .poll = p,
410 .used = 1,
411 .size = 1 });
412
413 sigio_unlock();
414 return;
415
416 out_kill:
417 l_write_sigio_pid = write_sigio_pid;
418 write_sigio_pid = -1;
419 sigio_unlock();
420 /* Going to call waitpid, avoid holding the lock. */
421 os_kill_process(l_write_sigio_pid, 1);
422 goto out_free;
423
424 out_clear:
425 write_sigio_pid = -1;
426 out_unlock:
427 sigio_unlock();
428 out_free:
429 kfree(p);
430 out_close2:
431 os_close_file(l_sigio_private[0]);
432 os_close_file(l_sigio_private[1]);
433 out_close1:
434 os_close_file(l_write_sigio_fds[0]);
435 os_close_file(l_write_sigio_fds[1]);
436 return;
437}
438
439int read_sigio_fd(int fd)
440{
441 int n;
442 char c;
443
444 n = os_read_file(fd, &c, sizeof(c));
445 if(n != sizeof(c)){
446 if(n < 0) {
447 printk("read_sigio_fd - read failed, err = %d\n", -n);
448 return(n);
449 }
450 else {
451 printk("read_sigio_fd - short read, bytes = %d\n", n);
452 return(-EIO);
453 }
454 }
455 return(n);
456}
457
458static void sigio_cleanup(void)
459{
460 if (write_sigio_pid != -1) {
461 os_kill_process(write_sigio_pid, 1);
462 write_sigio_pid = -1;
463 }
464}
465
466__uml_exitcall(sigio_cleanup);
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 3f70a2e12f06..2135eaf98a93 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -35,6 +35,8 @@ void switch_to_skas(void *prev, void *next)
35 switch_threads(&from->thread.mode.skas.switch_buf, 35 switch_threads(&from->thread.mode.skas.switch_buf,
36 to->thread.mode.skas.switch_buf); 36 to->thread.mode.skas.switch_buf);
37 37
38 arch_switch_to_skas(current->thread.prev_sched, current);
39
38 if(current->pid == 0) 40 if(current->pid == 0)
39 switch_timers(1); 41 switch_timers(1);
40} 42}
@@ -89,10 +91,17 @@ void fork_handler(int sig)
89 panic("blech"); 91 panic("blech");
90 92
91 schedule_tail(current->thread.prev_sched); 93 schedule_tail(current->thread.prev_sched);
94
95 /* XXX: if interrupt_end() calls schedule, this call to
96 * arch_switch_to_skas isn't needed. We could want to apply this to
97 * improve performance. -bb */
98 arch_switch_to_skas(current->thread.prev_sched, current);
99
92 current->thread.prev_sched = NULL; 100 current->thread.prev_sched = NULL;
93 101
94/* Handle any immediate reschedules or signals */ 102/* Handle any immediate reschedules or signals */
95 interrupt_end(); 103 interrupt_end();
104
96 userspace(&current->thread.regs.regs); 105 userspace(&current->thread.regs.regs);
97} 106}
98 107
@@ -109,6 +118,8 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
109 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp; 118 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
110 119
111 handler = fork_handler; 120 handler = fork_handler;
121
122 arch_copy_thread(&current->thread.arch, &p->thread.arch);
112 } 123 }
113 else { 124 else {
114 init_thread_registers(&p->thread.regs.regs); 125 init_thread_registers(&p->thread.regs.regs);
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 72113b0a96e7..511116aebaf7 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -77,9 +77,9 @@ static int idle_proc(void *cpup)
77 if(err < 0) 77 if(err < 0)
78 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); 78 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
79 79
80 activate_ipi(cpu_data[cpu].ipi_pipe[0], 80 os_set_fd_async(cpu_data[cpu].ipi_pipe[0],
81 current->thread.mode.tt.extern_pid); 81 current->thread.mode.tt.extern_pid);
82 82
83 wmb(); 83 wmb();
84 if (cpu_test_and_set(cpu, cpu_callin_map)) { 84 if (cpu_test_and_set(cpu, cpu_callin_map)) {
85 printk("huh, CPU#%d already present??\n", cpu); 85 printk("huh, CPU#%d already present??\n", cpu);
@@ -106,7 +106,7 @@ static struct task_struct *idle_thread(int cpu)
106 panic("copy_process failed in idle_thread, error = %ld", 106 panic("copy_process failed in idle_thread, error = %ld",
107 PTR_ERR(new_task)); 107 PTR_ERR(new_task));
108 108
109 cpu_tasks[cpu] = ((struct cpu_task) 109 cpu_tasks[cpu] = ((struct cpu_task)
110 { .pid = new_task->thread.mode.tt.extern_pid, 110 { .pid = new_task->thread.mode.tt.extern_pid,
111 .task = new_task } ); 111 .task = new_task } );
112 idle_threads[cpu] = new_task; 112 idle_threads[cpu] = new_task;
@@ -134,16 +134,15 @@ void smp_prepare_cpus(unsigned int maxcpus)
134 if(err < 0) 134 if(err < 0)
135 panic("CPU#0 failed to create IPI pipe, errno = %d", -err); 135 panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
136 136
137 activate_ipi(cpu_data[me].ipi_pipe[0], 137 os_set_fd_async(cpu_data[me].ipi_pipe[0],
138 current->thread.mode.tt.extern_pid); 138 current->thread.mode.tt.extern_pid);
139 139
140 for(cpu = 1; cpu < ncpus; cpu++){ 140 for(cpu = 1; cpu < ncpus; cpu++){
141 printk("Booting processor %d...\n", cpu); 141 printk("Booting processor %d...\n", cpu);
142 142
143 idle = idle_thread(cpu); 143 idle = idle_thread(cpu);
144 144
145 init_idle(idle, cpu); 145 init_idle(idle, cpu);
146 unhash_process(idle);
147 146
148 waittime = 200000000; 147 waittime = 200000000;
149 while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) 148 while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
@@ -223,7 +222,7 @@ void smp_call_function_slave(int cpu)
223 atomic_inc(&scf_finished); 222 atomic_inc(&scf_finished);
224} 223}
225 224
226int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, 225int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
227 int wait) 226 int wait)
228{ 227{
229 int cpus = num_online_cpus() - 1; 228 int cpus = num_online_cpus() - 1;
diff --git a/arch/um/kernel/syscall_kern.c b/arch/um/kernel/syscall_kern.c
index 8e1a3501ff46..37d3978337d8 100644
--- a/arch/um/kernel/syscall_kern.c
+++ b/arch/um/kernel/syscall_kern.c
@@ -104,7 +104,7 @@ long sys_pipe(unsigned long __user * fildes)
104} 104}
105 105
106 106
107long sys_uname(struct old_utsname * name) 107long sys_uname(struct old_utsname __user * name)
108{ 108{
109 long err; 109 long err;
110 if (!name) 110 if (!name)
@@ -115,7 +115,7 @@ long sys_uname(struct old_utsname * name)
115 return err?-EFAULT:0; 115 return err?-EFAULT:0;
116} 116}
117 117
118long sys_olduname(struct oldold_utsname * name) 118long sys_olduname(struct oldold_utsname __user * name)
119{ 119{
120 long error; 120 long error;
121 121
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index d56046c2aba2..02f6d4d8dc3a 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -198,7 +198,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
198 si.si_signo = SIGBUS; 198 si.si_signo = SIGBUS;
199 si.si_errno = 0; 199 si.si_errno = 0;
200 si.si_code = BUS_ADRERR; 200 si.si_code = BUS_ADRERR;
201 si.si_addr = (void *)address; 201 si.si_addr = (void __user *)address;
202 current->thread.arch.faultinfo = fi; 202 current->thread.arch.faultinfo = fi;
203 force_sig_info(SIGBUS, &si, current); 203 force_sig_info(SIGBUS, &si, current);
204 } else if (err == -ENOMEM) { 204 } else if (err == -ENOMEM) {
@@ -207,7 +207,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
207 } else { 207 } else {
208 BUG_ON(err != -EFAULT); 208 BUG_ON(err != -EFAULT);
209 si.si_signo = SIGSEGV; 209 si.si_signo = SIGSEGV;
210 si.si_addr = (void *) address; 210 si.si_addr = (void __user *) address;
211 current->thread.arch.faultinfo = fi; 211 current->thread.arch.faultinfo = fi;
212 force_sig_info(SIGSEGV, &si, current); 212 force_sig_info(SIGSEGV, &si, current);
213 } 213 }
@@ -220,8 +220,8 @@ void bad_segv(struct faultinfo fi, unsigned long ip)
220 220
221 si.si_signo = SIGSEGV; 221 si.si_signo = SIGSEGV;
222 si.si_code = SEGV_ACCERR; 222 si.si_code = SEGV_ACCERR;
223 si.si_addr = (void *) FAULT_ADDRESS(fi); 223 si.si_addr = (void __user *) FAULT_ADDRESS(fi);
224 current->thread.arch.faultinfo = fi; 224 current->thread.arch.faultinfo = fi;
225 force_sig_info(SIGSEGV, &si, current); 225 force_sig_info(SIGSEGV, &si, current);
226} 226}
227 227
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index 295c1ac817b3..a9c1443fc548 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -51,6 +51,13 @@ void switch_to_tt(void *prev, void *next)
51 51
52 c = 0; 52 c = 0;
53 53
54 /* Notice that here we "up" the semaphore on which "to" is waiting, and
55 * below (the read) we wait on this semaphore (which is implemented by
56 * switch_pipe) and go sleeping. Thus, after that, we have resumed in
57 * "to", and can't use any more the value of "from" (which is outdated),
58 * nor the value in "to" (since it was the task which stole us the CPU,
59 * which we don't care about). */
60
54 err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c)); 61 err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
55 if(err != sizeof(c)) 62 if(err != sizeof(c))
56 panic("write of switch_pipe failed, err = %d", -err); 63 panic("write of switch_pipe failed, err = %d", -err);
@@ -77,7 +84,7 @@ void switch_to_tt(void *prev, void *next)
77 change_sig(SIGALRM, alrm); 84 change_sig(SIGALRM, alrm);
78 change_sig(SIGPROF, prof); 85 change_sig(SIGPROF, prof);
79 86
80 arch_switch(); 87 arch_switch_to_tt(prev_sched, current);
81 88
82 flush_tlb_all(); 89 flush_tlb_all();
83 local_irq_restore(flags); 90 local_irq_restore(flags);
@@ -141,7 +148,6 @@ static void new_thread_handler(int sig)
141 set_cmdline("(kernel thread)"); 148 set_cmdline("(kernel thread)");
142 149
143 change_sig(SIGUSR1, 1); 150 change_sig(SIGUSR1, 1);
144 change_sig(SIGVTALRM, 1);
145 change_sig(SIGPROF, 1); 151 change_sig(SIGPROF, 1);
146 local_irq_enable(); 152 local_irq_enable();
147 if(!run_kernel_thread(fn, arg, &current->thread.exec_buf)) 153 if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
diff --git a/arch/um/kernel/tty_log.c b/arch/um/kernel/tty_log.c
deleted file mode 100644
index 9ada656f68ce..000000000000
--- a/arch/um/kernel/tty_log.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) and
3 * geoffrey hing <ghing@net.ohio-state.edu>
4 * Licensed under the GPL
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <unistd.h>
12#include <sys/time.h>
13#include "init.h"
14#include "user.h"
15#include "kern_util.h"
16#include "os.h"
17
18#define TTY_LOG_DIR "./"
19
20/* Set early in boot and then unchanged */
21static char *tty_log_dir = TTY_LOG_DIR;
22static int tty_log_fd = -1;
23
24#define TTY_LOG_OPEN 1
25#define TTY_LOG_CLOSE 2
26#define TTY_LOG_WRITE 3
27#define TTY_LOG_EXEC 4
28
29#define TTY_READ 1
30#define TTY_WRITE 2
31
32struct tty_log_buf {
33 int what;
34 unsigned long tty;
35 int len;
36 int direction;
37 unsigned long sec;
38 unsigned long usec;
39};
40
41int open_tty_log(void *tty, void *current_tty)
42{
43 struct timeval tv;
44 struct tty_log_buf data;
45 char buf[strlen(tty_log_dir) + sizeof("01234567890-01234567\0")];
46 int fd;
47
48 gettimeofday(&tv, NULL);
49 if(tty_log_fd != -1){
50 data = ((struct tty_log_buf) { .what = TTY_LOG_OPEN,
51 .tty = (unsigned long) tty,
52 .len = sizeof(current_tty),
53 .direction = 0,
54 .sec = tv.tv_sec,
55 .usec = tv.tv_usec } );
56 os_write_file(tty_log_fd, &data, sizeof(data));
57 os_write_file(tty_log_fd, &current_tty, data.len);
58 return(tty_log_fd);
59 }
60
61 sprintf(buf, "%s/%0u-%0u", tty_log_dir, (unsigned int) tv.tv_sec,
62 (unsigned int) tv.tv_usec);
63
64 fd = os_open_file(buf, of_append(of_create(of_rdwr(OPENFLAGS()))),
65 0644);
66 if(fd < 0){
67 printk("open_tty_log : couldn't open '%s', errno = %d\n",
68 buf, -fd);
69 }
70 return(fd);
71}
72
73void close_tty_log(int fd, void *tty)
74{
75 struct tty_log_buf data;
76 struct timeval tv;
77
78 if(tty_log_fd != -1){
79 gettimeofday(&tv, NULL);
80 data = ((struct tty_log_buf) { .what = TTY_LOG_CLOSE,
81 .tty = (unsigned long) tty,
82 .len = 0,
83 .direction = 0,
84 .sec = tv.tv_sec,
85 .usec = tv.tv_usec } );
86 os_write_file(tty_log_fd, &data, sizeof(data));
87 return;
88 }
89 os_close_file(fd);
90}
91
92static int log_chunk(int fd, const char *buf, int len)
93{
94 int total = 0, try, missed, n;
95 char chunk[64];
96
97 while(len > 0){
98 try = (len > sizeof(chunk)) ? sizeof(chunk) : len;
99 missed = copy_from_user_proc(chunk, (char *) buf, try);
100 try -= missed;
101 n = os_write_file(fd, chunk, try);
102 if(n != try) {
103 if(n < 0)
104 return(n);
105 return(-EIO);
106 }
107 if(missed != 0)
108 return(-EFAULT);
109
110 len -= try;
111 total += try;
112 buf += try;
113 }
114
115 return(total);
116}
117
118int write_tty_log(int fd, const char *buf, int len, void *tty, int is_read)
119{
120 struct timeval tv;
121 struct tty_log_buf data;
122 int direction;
123
124 if(fd == tty_log_fd){
125 gettimeofday(&tv, NULL);
126 direction = is_read ? TTY_READ : TTY_WRITE;
127 data = ((struct tty_log_buf) { .what = TTY_LOG_WRITE,
128 .tty = (unsigned long) tty,
129 .len = len,
130 .direction = direction,
131 .sec = tv.tv_sec,
132 .usec = tv.tv_usec } );
133 os_write_file(tty_log_fd, &data, sizeof(data));
134 }
135
136 return(log_chunk(fd, buf, len));
137}
138
139void log_exec(char **argv, void *tty)
140{
141 struct timeval tv;
142 struct tty_log_buf data;
143 char **ptr,*arg;
144 int len;
145
146 if(tty_log_fd == -1) return;
147
148 gettimeofday(&tv, NULL);
149
150 len = 0;
151 for(ptr = argv; ; ptr++){
152 if(copy_from_user_proc(&arg, ptr, sizeof(arg)))
153 return;
154 if(arg == NULL) break;
155 len += strlen_user_proc(arg);
156 }
157
158 data = ((struct tty_log_buf) { .what = TTY_LOG_EXEC,
159 .tty = (unsigned long) tty,
160 .len = len,
161 .direction = 0,
162 .sec = tv.tv_sec,
163 .usec = tv.tv_usec } );
164 os_write_file(tty_log_fd, &data, sizeof(data));
165
166 for(ptr = argv; ; ptr++){
167 if(copy_from_user_proc(&arg, ptr, sizeof(arg)))
168 return;
169 if(arg == NULL) break;
170 log_chunk(tty_log_fd, arg, strlen_user_proc(arg));
171 }
172}
173
174extern void register_tty_logger(int (*opener)(void *, void *),
175 int (*writer)(int, const char *, int,
176 void *, int),
177 void (*closer)(int, void *));
178
179static int register_logger(void)
180{
181 register_tty_logger(open_tty_log, write_tty_log, close_tty_log);
182 return(0);
183}
184
185__uml_initcall(register_logger);
186
187static int __init set_tty_log_dir(char *name, int *add)
188{
189 tty_log_dir = name;
190 return 0;
191}
192
193__uml_setup("tty_log_dir=", set_tty_log_dir,
194"tty_log_dir=<directory>\n"
195" This is used to specify the directory where the logs of all pty\n"
196" data from this UML machine will be written.\n\n"
197);
198
199static int __init set_tty_log_fd(char *name, int *add)
200{
201 char *end;
202
203 tty_log_fd = strtoul(name, &end, 0);
204 if((*end != '\0') || (end == name)){
205 printf("set_tty_log_fd - strtoul failed on '%s'\n", name);
206 tty_log_fd = -1;
207 }
208
209 *add = 0;
210 return 0;
211}
212
213__uml_setup("tty_log_fd=", set_tty_log_fd,
214"tty_log_fd=<fd>\n"
215" This is used to specify a preconfigured file descriptor to which all\n"
216" tty data will be written. Preconfigure the descriptor with something\n"
217" like '10>tty_log tty_log_fd=10'.\n\n"
218);
219
220
221/*
222 * Overrides for Emacs so that we follow Linus's tabbing style.
223 * Emacs will notice this stuff at the end of the file and automatically
224 * adjust the settings for this buffer only. This must remain at the end
225 * of the file.
226 * ---------------------------------------------------------------------------
227 * Local variables:
228 * c-file-style: "linux"
229 * End:
230 */
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 27cdf9164422..7d51dd7201c3 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -421,7 +421,7 @@ int linux_main(int argc, char **argv)
421#ifndef CONFIG_HIGHMEM 421#ifndef CONFIG_HIGHMEM
422 highmem = 0; 422 highmem = 0;
423 printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " 423 printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
424 "to %lu bytes\n", physmem_size); 424 "to %Lu bytes\n", physmem_size);
425#endif 425#endif
426 } 426 }
427 427
@@ -433,8 +433,8 @@ int linux_main(int argc, char **argv)
433 433
434 setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); 434 setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
435 if(init_maps(physmem_size, iomem_size, highmem)){ 435 if(init_maps(physmem_size, iomem_size, highmem)){
436 printf("Failed to allocate mem_map for %lu bytes of physical " 436 printf("Failed to allocate mem_map for %Lu bytes of physical "
437 "memory and %lu bytes of highmem\n", physmem_size, 437 "memory and %Lu bytes of highmem\n", physmem_size,
438 highmem); 438 highmem);
439 exit(1); 439 exit(1);
440 } 440 }
@@ -477,7 +477,8 @@ static struct notifier_block panic_exit_notifier = {
477 477
478void __init setup_arch(char **cmdline_p) 478void __init setup_arch(char **cmdline_p)
479{ 479{
480 notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); 480 atomic_notifier_chain_register(&panic_notifier_list,
481 &panic_exit_notifier);
481 paging_init(); 482 paging_init();
482 strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); 483 strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
483 *cmdline_p = command_line; 484 *cmdline_p = command_line;
@@ -487,10 +488,19 @@ void __init setup_arch(char **cmdline_p)
487void __init check_bugs(void) 488void __init check_bugs(void)
488{ 489{
489 arch_check_bugs(); 490 arch_check_bugs();
490 check_sigio(); 491 os_check_bugs();
491 check_devanon();
492} 492}
493 493
494void apply_alternatives(void *start, void *end) 494void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
495{
496}
497
498void alternatives_smp_module_add(struct module *mod, char *name,
499 void *locks, void *locks_end,
500 void *text, void *text_end)
501{
502}
503
504void alternatives_smp_module_del(struct module *mod)
495{ 505{
496} 506}