diff options
author | David S. Miller <davem@davemloft.net> | 2008-09-24 01:15:57 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-17 02:46:56 -0400 |
commit | 54514a70adefe356afe854e2d3912d46668068e6 (patch) | |
tree | e2b08f2c10ff427447fdc40e96555fc2f22549e1 | |
parent | 2e532d68a2b3e2aa6b19731501222069735c741c (diff) |
softirq: Add support for triggering softirq work on softirqs.
This is basically a genericization of Jens Axboe's block layer
remote softirq changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | include/linux/interrupt.h | 21 | ||||
-rw-r--r-- | include/linux/smp.h | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 129 |
3 files changed, 153 insertions, 1 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 54b3623434ec..35a61dc60d51 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/irqflags.h> | 13 | #include <linux/irqflags.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/percpu.h> | ||
14 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
15 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
16 | #include <asm/system.h> | 18 | #include <asm/system.h> |
@@ -273,6 +275,25 @@ extern void softirq_init(void); | |||
273 | extern void raise_softirq_irqoff(unsigned int nr); | 275 | extern void raise_softirq_irqoff(unsigned int nr); |
274 | extern void raise_softirq(unsigned int nr); | 276 | extern void raise_softirq(unsigned int nr); |
275 | 277 | ||
278 | /* This is the worklist that queues up per-cpu softirq work. | ||
279 | * | ||
280 | * send_remote_sendirq() adds work to these lists, and | ||
281 | * the softirq handler itself dequeues from them. The queues | ||
282 | * are protected by disabling local cpu interrupts and they must | ||
283 | * only be accessed by the local cpu that they are for. | ||
284 | */ | ||
285 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | ||
286 | |||
287 | /* Try to send a softirq to a remote cpu. If this cannot be done, the | ||
288 | * work will be queued to the local cpu. | ||
289 | */ | ||
290 | extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); | ||
291 | |||
292 | /* Like send_remote_softirq(), but the caller must disable local cpu interrupts | ||
293 | * and compute the current cpu, passed in as 'this_cpu'. | ||
294 | */ | ||
295 | extern void __send_remote_softirq(struct call_single_data *cp, int cpu, | ||
296 | int this_cpu, int softirq); | ||
276 | 297 | ||
277 | /* Tasklets --- multithreaded analogue of BHs. | 298 | /* Tasklets --- multithreaded analogue of BHs. |
278 | 299 | ||
diff --git a/include/linux/smp.h b/include/linux/smp.h index 66484d4a8459..2e4d58b26c06 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/types.h> | ||
10 | #include <linux/list.h> | 11 | #include <linux/list.h> |
11 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
12 | 13 | ||
@@ -16,7 +17,8 @@ struct call_single_data { | |||
16 | struct list_head list; | 17 | struct list_head list; |
17 | void (*func) (void *info); | 18 | void (*func) (void *info); |
18 | void *info; | 19 | void *info; |
19 | unsigned int flags; | 20 | u16 flags; |
21 | u16 priv; | ||
20 | }; | 22 | }; |
21 | 23 | ||
22 | #ifdef CONFIG_SMP | 24 | #ifdef CONFIG_SMP |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 37d67aa2d56f..83ba21a13bd4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * Distribute under GPLv2. | 6 | * Distribute under GPLv2. |
7 | * | 7 | * |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
9 | * | ||
10 | * Remote softirq infrastructure is by Jens Axboe. | ||
9 | */ | 11 | */ |
10 | 12 | ||
11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -474,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t) | |||
474 | 476 | ||
475 | EXPORT_SYMBOL(tasklet_kill); | 477 | EXPORT_SYMBOL(tasklet_kill); |
476 | 478 | ||
479 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | ||
480 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | ||
481 | |||
482 | static void __local_trigger(struct call_single_data *cp, int softirq) | ||
483 | { | ||
484 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | ||
485 | |||
486 | list_add_tail(&cp->list, head); | ||
487 | |||
488 | /* Trigger the softirq only if the list was previously empty. */ | ||
489 | if (head->next == &cp->list) | ||
490 | raise_softirq_irqoff(softirq); | ||
491 | } | ||
492 | |||
493 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
494 | static void remote_softirq_receive(void *data) | ||
495 | { | ||
496 | struct call_single_data *cp = data; | ||
497 | unsigned long flags; | ||
498 | int softirq; | ||
499 | |||
500 | softirq = cp->priv; | ||
501 | |||
502 | local_irq_save(flags); | ||
503 | __local_trigger(cp, softirq); | ||
504 | local_irq_restore(flags); | ||
505 | } | ||
506 | |||
507 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
508 | { | ||
509 | if (cpu_online(cpu)) { | ||
510 | cp->func = remote_softirq_receive; | ||
511 | cp->info = cp; | ||
512 | cp->flags = 0; | ||
513 | cp->priv = softirq; | ||
514 | |||
515 | __smp_call_function_single(cpu, cp); | ||
516 | return 0; | ||
517 | } | ||
518 | return 1; | ||
519 | } | ||
520 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
521 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
522 | { | ||
523 | return 1; | ||
524 | } | ||
525 | #endif | ||
526 | |||
527 | /** | ||
528 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | ||
529 | * @cp: private SMP call function data area | ||
530 | * @cpu: the remote cpu | ||
531 | * @this_cpu: the currently executing cpu | ||
532 | * @softirq: the softirq for the work | ||
533 | * | ||
534 | * Attempt to schedule softirq work on a remote cpu. If this cannot be | ||
535 | * done, the work is instead queued up on the local cpu. | ||
536 | * | ||
537 | * Interrupts must be disabled. | ||
538 | */ | ||
539 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | ||
540 | { | ||
541 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | ||
542 | __local_trigger(cp, softirq); | ||
543 | } | ||
544 | EXPORT_SYMBOL(__send_remote_softirq); | ||
545 | |||
546 | /** | ||
547 | * send_remote_softirq - try to schedule softirq work on a remote cpu | ||
548 | * @cp: private SMP call function data area | ||
549 | * @cpu: the remote cpu | ||
550 | * @softirq: the softirq for the work | ||
551 | * | ||
552 | * Like __send_remote_softirq except that disabling interrupts and | ||
553 | * computing the current cpu is done for the caller. | ||
554 | */ | ||
555 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
556 | { | ||
557 | unsigned long flags; | ||
558 | int this_cpu; | ||
559 | |||
560 | local_irq_save(flags); | ||
561 | this_cpu = smp_processor_id(); | ||
562 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | ||
563 | local_irq_restore(flags); | ||
564 | } | ||
565 | EXPORT_SYMBOL(send_remote_softirq); | ||
566 | |||
567 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | ||
568 | unsigned long action, void *hcpu) | ||
569 | { | ||
570 | /* | ||
571 | * If a CPU goes away, splice its entries to the current CPU | ||
572 | * and trigger a run of the softirq | ||
573 | */ | ||
574 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
575 | int cpu = (unsigned long) hcpu; | ||
576 | int i; | ||
577 | |||
578 | local_irq_disable(); | ||
579 | for (i = 0; i < NR_SOFTIRQS; i++) { | ||
580 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | ||
581 | struct list_head *local_head; | ||
582 | |||
583 | if (list_empty(head)) | ||
584 | continue; | ||
585 | |||
586 | local_head = &__get_cpu_var(softirq_work_list[i]); | ||
587 | list_splice_init(head, local_head); | ||
588 | raise_softirq_irqoff(i); | ||
589 | } | ||
590 | local_irq_enable(); | ||
591 | } | ||
592 | |||
593 | return NOTIFY_OK; | ||
594 | } | ||
595 | |||
596 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | ||
597 | .notifier_call = remote_softirq_cpu_notify, | ||
598 | }; | ||
599 | |||
477 | void __init softirq_init(void) | 600 | void __init softirq_init(void) |
478 | { | 601 | { |
479 | int cpu; | 602 | int cpu; |
480 | 603 | ||
481 | for_each_possible_cpu(cpu) { | 604 | for_each_possible_cpu(cpu) { |
605 | int i; | ||
606 | |||
482 | per_cpu(tasklet_vec, cpu).tail = | 607 | per_cpu(tasklet_vec, cpu).tail = |
483 | &per_cpu(tasklet_vec, cpu).head; | 608 | &per_cpu(tasklet_vec, cpu).head; |
484 | per_cpu(tasklet_hi_vec, cpu).tail = | 609 | per_cpu(tasklet_hi_vec, cpu).tail = |
485 | &per_cpu(tasklet_hi_vec, cpu).head; | 610 | &per_cpu(tasklet_hi_vec, cpu).head; |
611 | for (i = 0; i < NR_SOFTIRQS; i++) | ||
612 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | ||
486 | } | 613 | } |
487 | 614 | ||
615 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); | ||
616 | |||
488 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); | 617 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
489 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 618 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
490 | } | 619 | } |