diff options
Diffstat (limited to 'arch/um/kernel/irq.c')
-rw-r--r-- | arch/um/kernel/irq.c | 111 |
1 files changed, 111 insertions, 0 deletions
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a9651a175eb5..dba04d88b432 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "sigio.h" | 32 | #include "sigio.h" |
33 | #include "um_malloc.h" | 33 | #include "um_malloc.h" |
34 | #include "misc_constants.h" | 34 | #include "misc_constants.h" |
35 | #include "as-layout.h" | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * Generic, controller-independent functions: | 38 | * Generic, controller-independent functions: |
@@ -468,3 +469,113 @@ int init_aio_irq(int irq, char *name, irq_handler_t handler) | |||
468 | out: | 469 | out: |
469 | return err; | 470 | return err; |
470 | } | 471 | } |
472 | |||
473 | /* | ||
474 | * IRQ stack entry and exit: | ||
475 | * | ||
476 | * Unlike i386, UML doesn't receive IRQs on the normal kernel stack | ||
477 | * and switch over to the IRQ stack after some preparation. We use | ||
478 | * sigaltstack to receive signals on a separate stack from the start. | ||
479 | * These two functions make sure the rest of the kernel won't be too | ||
480 | * upset by being on a different stack. The IRQ stack has a | ||
481 | * thread_info structure at the bottom so that current et al continue | ||
482 | * to work. | ||
483 | * | ||
484 | * to_irq_stack copies the current task's thread_info to the IRQ stack | ||
485 | * thread_info and sets the tasks's stack to point to the IRQ stack. | ||
486 | * | ||
487 | * from_irq_stack copies the thread_info struct back (flags may have | ||
488 | * been modified) and resets the task's stack pointer. | ||
489 | * | ||
490 | * Tricky bits - | ||
491 | * | ||
492 | * What happens when two signals race each other? UML doesn't block | ||
493 | * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal | ||
494 | * could arrive while a previous one is still setting up the | ||
495 | * thread_info. | ||
496 | * | ||
497 | * There are three cases - | ||
498 | * The first interrupt on the stack - sets up the thread_info and | ||
499 | * handles the interrupt | ||
500 | * A nested interrupt interrupting the copying of the thread_info - | ||
501 | * can't handle the interrupt, as the stack is in an unknown state | ||
502 | * A nested interrupt not interrupting the copying of the | ||
503 | * thread_info - doesn't do any setup, just handles the interrupt | ||
504 | * | ||
505 | * The first job is to figure out whether we interrupted stack setup. | ||
506 | * This is done by xchging the signal mask with thread_info->pending. | ||
507 | * If the value that comes back is zero, then there is no setup in | ||
508 | * progress, and the interrupt can be handled. If the value is | ||
509 | * non-zero, then there is stack setup in progress. In order to have | ||
510 | * the interrupt handled, we leave our signal in the mask, and it will | ||
511 | * be handled by the upper handler after it has set up the stack. | ||
512 | * | ||
513 | * Next is to figure out whether we are the outer handler or a nested | ||
514 | * one. As part of setting up the stack, thread_info->real_thread is | ||
515 | * set to non-NULL (and is reset to NULL on exit). This is the | ||
516 | * nesting indicator. If it is non-NULL, then the stack is already | ||
517 | * set up and the handler can run. | ||
518 | */ | ||
519 | |||
520 | static unsigned long pending_mask; | ||
521 | |||
522 | unsigned long to_irq_stack(int sig, unsigned long *mask_out) | ||
523 | { | ||
524 | struct thread_info *ti; | ||
525 | unsigned long mask, old; | ||
526 | int nested; | ||
527 | |||
528 | mask = xchg(&pending_mask, 1 << sig); | ||
529 | if(mask != 0){ | ||
530 | /* If any interrupts come in at this point, we want to | ||
531 | * make sure that their bits aren't lost by our | ||
532 | * putting our bit in. So, this loop accumulates bits | ||
533 | * until xchg returns the same value that we put in. | ||
534 | * When that happens, there were no new interrupts, | ||
535 | * and pending_mask contains a bit for each interrupt | ||
536 | * that came in. | ||
537 | */ | ||
538 | old = 1 << sig; | ||
539 | do { | ||
540 | old |= mask; | ||
541 | mask = xchg(&pending_mask, old); | ||
542 | } while(mask != old); | ||
543 | return 1; | ||
544 | } | ||
545 | |||
546 | ti = current_thread_info(); | ||
547 | nested = (ti->real_thread != NULL); | ||
548 | if(!nested){ | ||
549 | struct task_struct *task; | ||
550 | struct thread_info *tti; | ||
551 | |||
552 | task = cpu_tasks[ti->cpu].task; | ||
553 | tti = task_thread_info(task); | ||
554 | *ti = *tti; | ||
555 | ti->real_thread = tti; | ||
556 | task->stack = ti; | ||
557 | } | ||
558 | |||
559 | mask = xchg(&pending_mask, 0); | ||
560 | *mask_out |= mask | nested; | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | unsigned long from_irq_stack(int nested) | ||
565 | { | ||
566 | struct thread_info *ti, *to; | ||
567 | unsigned long mask; | ||
568 | |||
569 | ti = current_thread_info(); | ||
570 | |||
571 | pending_mask = 1; | ||
572 | |||
573 | to = ti->real_thread; | ||
574 | current->stack = to; | ||
575 | ti->real_thread = NULL; | ||
576 | *to = *ti; | ||
577 | |||
578 | mask = xchg(&pending_mask, 0); | ||
579 | return mask & ~1; | ||
580 | } | ||
581 | |||