diff options
Diffstat (limited to 'arch/um/kernel/irq_user.c')
-rw-r--r-- | arch/um/kernel/irq_user.c | 443 |
1 files changed, 443 insertions, 0 deletions
diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c new file mode 100644 index 000000000000..6d6f9484b884 --- /dev/null +++ b/arch/um/kernel/irq_user.c | |||
@@ -0,0 +1,443 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <stdlib.h> | ||
7 | #include <unistd.h> | ||
8 | #include <errno.h> | ||
9 | #include <signal.h> | ||
10 | #include <string.h> | ||
11 | #include <sys/poll.h> | ||
12 | #include <sys/types.h> | ||
13 | #include <sys/time.h> | ||
14 | #include "user_util.h" | ||
15 | #include "kern_util.h" | ||
16 | #include "user.h" | ||
17 | #include "process.h" | ||
18 | #include "signal_user.h" | ||
19 | #include "sigio.h" | ||
20 | #include "irq_user.h" | ||
21 | #include "os.h" | ||
22 | |||
23 | struct irq_fd { | ||
24 | struct irq_fd *next; | ||
25 | void *id; | ||
26 | int fd; | ||
27 | int type; | ||
28 | int irq; | ||
29 | int pid; | ||
30 | int events; | ||
31 | int current_events; | ||
32 | int freed; | ||
33 | }; | ||
34 | |||
35 | static struct irq_fd *active_fds = NULL; | ||
36 | static struct irq_fd **last_irq_ptr = &active_fds; | ||
37 | |||
38 | static struct pollfd *pollfds = NULL; | ||
39 | static int pollfds_num = 0; | ||
40 | static int pollfds_size = 0; | ||
41 | |||
42 | extern int io_count, intr_count; | ||
43 | |||
44 | void sigio_handler(int sig, union uml_pt_regs *regs) | ||
45 | { | ||
46 | struct irq_fd *irq_fd, *next; | ||
47 | int i, n; | ||
48 | |||
49 | if(smp_sigio_handler()) return; | ||
50 | while(1){ | ||
51 | n = poll(pollfds, pollfds_num, 0); | ||
52 | if(n < 0){ | ||
53 | if(errno == EINTR) continue; | ||
54 | printk("sigio_handler : poll returned %d, " | ||
55 | "errno = %d\n", n, errno); | ||
56 | break; | ||
57 | } | ||
58 | if(n == 0) break; | ||
59 | |||
60 | irq_fd = active_fds; | ||
61 | for(i = 0; i < pollfds_num; i++){ | ||
62 | if(pollfds[i].revents != 0){ | ||
63 | irq_fd->current_events = pollfds[i].revents; | ||
64 | pollfds[i].fd = -1; | ||
65 | } | ||
66 | irq_fd = irq_fd->next; | ||
67 | } | ||
68 | |||
69 | for(irq_fd = active_fds; irq_fd != NULL; irq_fd = next){ | ||
70 | next = irq_fd->next; | ||
71 | if(irq_fd->current_events != 0){ | ||
72 | irq_fd->current_events = 0; | ||
73 | do_IRQ(irq_fd->irq, regs); | ||
74 | |||
75 | /* This is here because the next irq may be | ||
76 | * freed in the handler. If a console goes | ||
77 | * away, both the read and write irqs will be | ||
78 | * freed. After do_IRQ, ->next will point to | ||
79 | * a good IRQ. | ||
80 | * Irqs can't be freed inside their handlers, | ||
81 | * so the next best thing is to have them | ||
82 | * marked as needing freeing, so that they | ||
83 | * can be freed here. | ||
84 | */ | ||
85 | next = irq_fd->next; | ||
86 | if(irq_fd->freed){ | ||
87 | free_irq(irq_fd->irq, irq_fd->id); | ||
88 | free_irq_by_irq_and_dev(irq_fd->irq, | ||
89 | irq_fd->id); | ||
90 | } | ||
91 | } | ||
92 | } | ||
93 | } | ||
94 | } | ||
95 | |||
96 | int activate_ipi(int fd, int pid) | ||
97 | { | ||
98 | return(os_set_fd_async(fd, pid)); | ||
99 | } | ||
100 | |||
101 | static void maybe_sigio_broken(int fd, int type) | ||
102 | { | ||
103 | if(isatty(fd)){ | ||
104 | if((type == IRQ_WRITE) && !pty_output_sigio){ | ||
105 | write_sigio_workaround(); | ||
106 | add_sigio_fd(fd, 0); | ||
107 | } | ||
108 | else if((type == IRQ_READ) && !pty_close_sigio){ | ||
109 | write_sigio_workaround(); | ||
110 | add_sigio_fd(fd, 1); | ||
111 | } | ||
112 | } | ||
113 | } | ||
114 | |||
115 | int activate_fd(int irq, int fd, int type, void *dev_id) | ||
116 | { | ||
117 | struct pollfd *tmp_pfd; | ||
118 | struct irq_fd *new_fd, *irq_fd; | ||
119 | unsigned long flags; | ||
120 | int pid, events, err, n, size; | ||
121 | |||
122 | pid = os_getpid(); | ||
123 | err = os_set_fd_async(fd, pid); | ||
124 | if(err < 0) | ||
125 | goto out; | ||
126 | |||
127 | new_fd = um_kmalloc(sizeof(*new_fd)); | ||
128 | err = -ENOMEM; | ||
129 | if(new_fd == NULL) | ||
130 | goto out; | ||
131 | |||
132 | if(type == IRQ_READ) events = POLLIN | POLLPRI; | ||
133 | else events = POLLOUT; | ||
134 | *new_fd = ((struct irq_fd) { .next = NULL, | ||
135 | .id = dev_id, | ||
136 | .fd = fd, | ||
137 | .type = type, | ||
138 | .irq = irq, | ||
139 | .pid = pid, | ||
140 | .events = events, | ||
141 | .current_events = 0, | ||
142 | .freed = 0 } ); | ||
143 | |||
144 | /* Critical section - locked by a spinlock because this stuff can | ||
145 | * be changed from interrupt handlers. The stuff above is done | ||
146 | * outside the lock because it allocates memory. | ||
147 | */ | ||
148 | |||
149 | /* Actually, it only looks like it can be called from interrupt | ||
150 | * context. The culprit is reactivate_fd, which calls | ||
151 | * maybe_sigio_broken, which calls write_sigio_workaround, | ||
152 | * which calls activate_fd. However, write_sigio_workaround should | ||
153 | * only be called once, at boot time. That would make it clear that | ||
154 | * this is called only from process context, and can be locked with | ||
155 | * a semaphore. | ||
156 | */ | ||
157 | flags = irq_lock(); | ||
158 | for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ | ||
159 | if((irq_fd->fd == fd) && (irq_fd->type == type)){ | ||
160 | printk("Registering fd %d twice\n", fd); | ||
161 | printk("Irqs : %d, %d\n", irq_fd->irq, irq); | ||
162 | printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id); | ||
163 | goto out_unlock; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | n = pollfds_num; | ||
168 | if(n == pollfds_size){ | ||
169 | while(1){ | ||
170 | /* Here we have to drop the lock in order to call | ||
171 | * kmalloc, which might sleep. If something else | ||
172 | * came in and changed the pollfds array, we free | ||
173 | * the buffer and try again. | ||
174 | */ | ||
175 | irq_unlock(flags); | ||
176 | size = (pollfds_num + 1) * sizeof(pollfds[0]); | ||
177 | tmp_pfd = um_kmalloc(size); | ||
178 | flags = irq_lock(); | ||
179 | if(tmp_pfd == NULL) | ||
180 | goto out_unlock; | ||
181 | if(n == pollfds_size) | ||
182 | break; | ||
183 | kfree(tmp_pfd); | ||
184 | } | ||
185 | if(pollfds != NULL){ | ||
186 | memcpy(tmp_pfd, pollfds, | ||
187 | sizeof(pollfds[0]) * pollfds_size); | ||
188 | kfree(pollfds); | ||
189 | } | ||
190 | pollfds = tmp_pfd; | ||
191 | pollfds_size++; | ||
192 | } | ||
193 | |||
194 | if(type == IRQ_WRITE) | ||
195 | fd = -1; | ||
196 | |||
197 | pollfds[pollfds_num] = ((struct pollfd) { .fd = fd, | ||
198 | .events = events, | ||
199 | .revents = 0 }); | ||
200 | pollfds_num++; | ||
201 | |||
202 | *last_irq_ptr = new_fd; | ||
203 | last_irq_ptr = &new_fd->next; | ||
204 | |||
205 | irq_unlock(flags); | ||
206 | |||
207 | /* This calls activate_fd, so it has to be outside the critical | ||
208 | * section. | ||
209 | */ | ||
210 | maybe_sigio_broken(fd, type); | ||
211 | |||
212 | return(0); | ||
213 | |||
214 | out_unlock: | ||
215 | irq_unlock(flags); | ||
216 | kfree(new_fd); | ||
217 | out: | ||
218 | return(err); | ||
219 | } | ||
220 | |||
221 | static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) | ||
222 | { | ||
223 | struct irq_fd **prev; | ||
224 | unsigned long flags; | ||
225 | int i = 0; | ||
226 | |||
227 | flags = irq_lock(); | ||
228 | prev = &active_fds; | ||
229 | while(*prev != NULL){ | ||
230 | if((*test)(*prev, arg)){ | ||
231 | struct irq_fd *old_fd = *prev; | ||
232 | if((pollfds[i].fd != -1) && | ||
233 | (pollfds[i].fd != (*prev)->fd)){ | ||
234 | printk("free_irq_by_cb - mismatch between " | ||
235 | "active_fds and pollfds, fd %d vs %d\n", | ||
236 | (*prev)->fd, pollfds[i].fd); | ||
237 | goto out; | ||
238 | } | ||
239 | memcpy(&pollfds[i], &pollfds[i + 1], | ||
240 | (pollfds_num - i - 1) * sizeof(pollfds[0])); | ||
241 | pollfds_num--; | ||
242 | if(last_irq_ptr == &old_fd->next) | ||
243 | last_irq_ptr = prev; | ||
244 | *prev = (*prev)->next; | ||
245 | if(old_fd->type == IRQ_WRITE) | ||
246 | ignore_sigio_fd(old_fd->fd); | ||
247 | kfree(old_fd); | ||
248 | continue; | ||
249 | } | ||
250 | prev = &(*prev)->next; | ||
251 | i++; | ||
252 | } | ||
253 | out: | ||
254 | irq_unlock(flags); | ||
255 | } | ||
256 | |||
257 | struct irq_and_dev { | ||
258 | int irq; | ||
259 | void *dev; | ||
260 | }; | ||
261 | |||
262 | static int same_irq_and_dev(struct irq_fd *irq, void *d) | ||
263 | { | ||
264 | struct irq_and_dev *data = d; | ||
265 | |||
266 | return((irq->irq == data->irq) && (irq->id == data->dev)); | ||
267 | } | ||
268 | |||
269 | void free_irq_by_irq_and_dev(unsigned int irq, void *dev) | ||
270 | { | ||
271 | struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, | ||
272 | .dev = dev }); | ||
273 | |||
274 | free_irq_by_cb(same_irq_and_dev, &data); | ||
275 | } | ||
276 | |||
277 | static int same_fd(struct irq_fd *irq, void *fd) | ||
278 | { | ||
279 | return(irq->fd == *((int *) fd)); | ||
280 | } | ||
281 | |||
282 | void free_irq_by_fd(int fd) | ||
283 | { | ||
284 | free_irq_by_cb(same_fd, &fd); | ||
285 | } | ||
286 | |||
287 | static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) | ||
288 | { | ||
289 | struct irq_fd *irq; | ||
290 | int i = 0; | ||
291 | |||
292 | for(irq=active_fds; irq != NULL; irq = irq->next){ | ||
293 | if((irq->fd == fd) && (irq->irq == irqnum)) break; | ||
294 | i++; | ||
295 | } | ||
296 | if(irq == NULL){ | ||
297 | printk("find_irq_by_fd doesn't have descriptor %d\n", fd); | ||
298 | goto out; | ||
299 | } | ||
300 | if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){ | ||
301 | printk("find_irq_by_fd - mismatch between active_fds and " | ||
302 | "pollfds, fd %d vs %d, need %d\n", irq->fd, | ||
303 | pollfds[i].fd, fd); | ||
304 | irq = NULL; | ||
305 | goto out; | ||
306 | } | ||
307 | *index_out = i; | ||
308 | out: | ||
309 | return(irq); | ||
310 | } | ||
311 | |||
312 | void free_irq_later(int irq, void *dev_id) | ||
313 | { | ||
314 | struct irq_fd *irq_fd; | ||
315 | unsigned long flags; | ||
316 | |||
317 | flags = irq_lock(); | ||
318 | for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ | ||
319 | if((irq_fd->irq == irq) && (irq_fd->id == dev_id)) | ||
320 | break; | ||
321 | } | ||
322 | if(irq_fd == NULL){ | ||
323 | printk("free_irq_later found no irq, irq = %d, " | ||
324 | "dev_id = 0x%p\n", irq, dev_id); | ||
325 | goto out; | ||
326 | } | ||
327 | irq_fd->freed = 1; | ||
328 | out: | ||
329 | irq_unlock(flags); | ||
330 | } | ||
331 | |||
332 | void reactivate_fd(int fd, int irqnum) | ||
333 | { | ||
334 | struct irq_fd *irq; | ||
335 | unsigned long flags; | ||
336 | int i; | ||
337 | |||
338 | flags = irq_lock(); | ||
339 | irq = find_irq_by_fd(fd, irqnum, &i); | ||
340 | if(irq == NULL){ | ||
341 | irq_unlock(flags); | ||
342 | return; | ||
343 | } | ||
344 | |||
345 | pollfds[i].fd = irq->fd; | ||
346 | |||
347 | irq_unlock(flags); | ||
348 | |||
349 | /* This calls activate_fd, so it has to be outside the critical | ||
350 | * section. | ||
351 | */ | ||
352 | maybe_sigio_broken(fd, irq->type); | ||
353 | } | ||
354 | |||
355 | void deactivate_fd(int fd, int irqnum) | ||
356 | { | ||
357 | struct irq_fd *irq; | ||
358 | unsigned long flags; | ||
359 | int i; | ||
360 | |||
361 | flags = irq_lock(); | ||
362 | irq = find_irq_by_fd(fd, irqnum, &i); | ||
363 | if(irq == NULL) | ||
364 | goto out; | ||
365 | pollfds[i].fd = -1; | ||
366 | out: | ||
367 | irq_unlock(flags); | ||
368 | } | ||
369 | |||
370 | int deactivate_all_fds(void) | ||
371 | { | ||
372 | struct irq_fd *irq; | ||
373 | int err; | ||
374 | |||
375 | for(irq=active_fds;irq != NULL;irq = irq->next){ | ||
376 | err = os_clear_fd_async(irq->fd); | ||
377 | if(err) | ||
378 | return(err); | ||
379 | } | ||
380 | /* If there is a signal already queued, after unblocking ignore it */ | ||
381 | set_handler(SIGIO, SIG_IGN, 0, -1); | ||
382 | |||
383 | return(0); | ||
384 | } | ||
385 | |||
386 | void forward_ipi(int fd, int pid) | ||
387 | { | ||
388 | int err; | ||
389 | |||
390 | err = os_set_owner(fd, pid); | ||
391 | if(err < 0) | ||
392 | printk("forward_ipi: set_owner failed, fd = %d, me = %d, " | ||
393 | "target = %d, err = %d\n", fd, os_getpid(), pid, -err); | ||
394 | } | ||
395 | |||
396 | void forward_interrupts(int pid) | ||
397 | { | ||
398 | struct irq_fd *irq; | ||
399 | unsigned long flags; | ||
400 | int err; | ||
401 | |||
402 | flags = irq_lock(); | ||
403 | for(irq=active_fds;irq != NULL;irq = irq->next){ | ||
404 | err = os_set_owner(irq->fd, pid); | ||
405 | if(err < 0){ | ||
406 | /* XXX Just remove the irq rather than | ||
407 | * print out an infinite stream of these | ||
408 | */ | ||
409 | printk("Failed to forward %d to pid %d, err = %d\n", | ||
410 | irq->fd, pid, -err); | ||
411 | } | ||
412 | |||
413 | irq->pid = pid; | ||
414 | } | ||
415 | irq_unlock(flags); | ||
416 | } | ||
417 | |||
418 | void init_irq_signals(int on_sigstack) | ||
419 | { | ||
420 | __sighandler_t h; | ||
421 | int flags; | ||
422 | |||
423 | flags = on_sigstack ? SA_ONSTACK : 0; | ||
424 | if(timer_irq_inited) h = (__sighandler_t) alarm_handler; | ||
425 | else h = boot_timer_handler; | ||
426 | |||
427 | set_handler(SIGVTALRM, h, flags | SA_RESTART, | ||
428 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1); | ||
429 | set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART, | ||
430 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); | ||
431 | signal(SIGWINCH, SIG_IGN); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
436 | * Emacs will notice this stuff at the end of the file and automatically | ||
437 | * adjust the settings for this buffer only. This must remain at the end | ||
438 | * of the file. | ||
439 | * --------------------------------------------------------------------------- | ||
440 | * Local variables: | ||
441 | * c-file-style: "linux" | ||
442 | * End: | ||
443 | */ | ||