diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/module.c | 11 | ||||
| -rw-r--r-- | kernel/panic.c | 2 | ||||
| -rw-r--r-- | kernel/softirq.c | 129 |
3 files changed, 142 insertions, 0 deletions
diff --git a/kernel/module.c b/kernel/module.c index b7205f67cfaf..25bc9ac9e226 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1834,6 +1834,7 @@ static noinline struct module *load_module(void __user *umod, | |||
| 1834 | Elf_Ehdr *hdr; | 1834 | Elf_Ehdr *hdr; |
| 1835 | Elf_Shdr *sechdrs; | 1835 | Elf_Shdr *sechdrs; |
| 1836 | char *secstrings, *args, *modmagic, *strtab = NULL; | 1836 | char *secstrings, *args, *modmagic, *strtab = NULL; |
| 1837 | char *staging; | ||
| 1837 | unsigned int i; | 1838 | unsigned int i; |
| 1838 | unsigned int symindex = 0; | 1839 | unsigned int symindex = 0; |
| 1839 | unsigned int strindex = 0; | 1840 | unsigned int strindex = 0; |
| @@ -1989,6 +1990,14 @@ static noinline struct module *load_module(void __user *umod, | |||
| 1989 | goto free_hdr; | 1990 | goto free_hdr; |
| 1990 | } | 1991 | } |
| 1991 | 1992 | ||
| 1993 | staging = get_modinfo(sechdrs, infoindex, "staging"); | ||
| 1994 | if (staging) { | ||
| 1995 | add_taint_module(mod, TAINT_CRAP); | ||
| 1996 | printk(KERN_WARNING "%s: module is from the staging directory," | ||
| 1997 | " the quality is unknown, you have been warned.\n", | ||
| 1998 | mod->name); | ||
| 1999 | } | ||
| 2000 | |||
| 1992 | /* Now copy in args */ | 2001 | /* Now copy in args */ |
| 1993 | args = strndup_user(uargs, ~0UL >> 1); | 2002 | args = strndup_user(uargs, ~0UL >> 1); |
| 1994 | if (IS_ERR(args)) { | 2003 | if (IS_ERR(args)) { |
| @@ -2587,6 +2596,8 @@ static char *module_flags(struct module *mod, char *buf) | |||
| 2587 | buf[bx++] = 'P'; | 2596 | buf[bx++] = 'P'; |
| 2588 | if (mod->taints & (1 << TAINT_FORCED_MODULE)) | 2597 | if (mod->taints & (1 << TAINT_FORCED_MODULE)) |
| 2589 | buf[bx++] = 'F'; | 2598 | buf[bx++] = 'F'; |
| 2599 | if (mod->taints & (1 << TAINT_CRAP)) | ||
| 2600 | buf[bx++] = 'C'; | ||
| 2590 | /* | 2601 | /* |
| 2591 | * TAINT_FORCED_RMMOD: could be added. | 2602 | * TAINT_FORCED_RMMOD: could be added. |
| 2592 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | 2603 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't |
diff --git a/kernel/panic.c b/kernel/panic.c index f290e8e866f6..bda561ef3cdf 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -161,6 +161,7 @@ static const struct tnt tnts[] = { | |||
| 161 | { TAINT_DIE, 'D', ' ' }, | 161 | { TAINT_DIE, 'D', ' ' }, |
| 162 | { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, | 162 | { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, |
| 163 | { TAINT_WARN, 'W', ' ' }, | 163 | { TAINT_WARN, 'W', ' ' }, |
| 164 | { TAINT_CRAP, 'C', ' ' }, | ||
| 164 | }; | 165 | }; |
| 165 | 166 | ||
| 166 | /** | 167 | /** |
| @@ -175,6 +176,7 @@ static const struct tnt tnts[] = { | |||
| 175 | * 'U' - Userspace-defined naughtiness. | 176 | * 'U' - Userspace-defined naughtiness. |
| 176 | * 'A' - ACPI table overridden. | 177 | * 'A' - ACPI table overridden. |
| 177 | * 'W' - Taint on warning. | 178 | * 'W' - Taint on warning. |
| 179 | * 'C' - modules from drivers/staging are loaded. | ||
| 178 | * | 180 | * |
| 179 | * The string is overwritten by the next call to print_taint(). | 181 | * The string is overwritten by the next call to print_taint(). |
| 180 | */ | 182 | */ |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 37d67aa2d56f..83ba21a13bd4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | * Distribute under GPLv2. | 6 | * Distribute under GPLv2. |
| 7 | * | 7 | * |
| 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
| 9 | * | ||
| 10 | * Remote softirq infrastructure is by Jens Axboe. | ||
| 9 | */ | 11 | */ |
| 10 | 12 | ||
| 11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| @@ -474,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t) | |||
| 474 | 476 | ||
| 475 | EXPORT_SYMBOL(tasklet_kill); | 477 | EXPORT_SYMBOL(tasklet_kill); |
| 476 | 478 | ||
| 479 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | ||
| 480 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | ||
| 481 | |||
| 482 | static void __local_trigger(struct call_single_data *cp, int softirq) | ||
| 483 | { | ||
| 484 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | ||
| 485 | |||
| 486 | list_add_tail(&cp->list, head); | ||
| 487 | |||
| 488 | /* Trigger the softirq only if the list was previously empty. */ | ||
| 489 | if (head->next == &cp->list) | ||
| 490 | raise_softirq_irqoff(softirq); | ||
| 491 | } | ||
| 492 | |||
| 493 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
| 494 | static void remote_softirq_receive(void *data) | ||
| 495 | { | ||
| 496 | struct call_single_data *cp = data; | ||
| 497 | unsigned long flags; | ||
| 498 | int softirq; | ||
| 499 | |||
| 500 | softirq = cp->priv; | ||
| 501 | |||
| 502 | local_irq_save(flags); | ||
| 503 | __local_trigger(cp, softirq); | ||
| 504 | local_irq_restore(flags); | ||
| 505 | } | ||
| 506 | |||
| 507 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
| 508 | { | ||
| 509 | if (cpu_online(cpu)) { | ||
| 510 | cp->func = remote_softirq_receive; | ||
| 511 | cp->info = cp; | ||
| 512 | cp->flags = 0; | ||
| 513 | cp->priv = softirq; | ||
| 514 | |||
| 515 | __smp_call_function_single(cpu, cp); | ||
| 516 | return 0; | ||
| 517 | } | ||
| 518 | return 1; | ||
| 519 | } | ||
| 520 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
| 521 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
| 522 | { | ||
| 523 | return 1; | ||
| 524 | } | ||
| 525 | #endif | ||
| 526 | |||
| 527 | /** | ||
| 528 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | ||
| 529 | * @cp: private SMP call function data area | ||
| 530 | * @cpu: the remote cpu | ||
| 531 | * @this_cpu: the currently executing cpu | ||
| 532 | * @softirq: the softirq for the work | ||
| 533 | * | ||
| 534 | * Attempt to schedule softirq work on a remote cpu. If this cannot be | ||
| 535 | * done, the work is instead queued up on the local cpu. | ||
| 536 | * | ||
| 537 | * Interrupts must be disabled. | ||
| 538 | */ | ||
| 539 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | ||
| 540 | { | ||
| 541 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | ||
| 542 | __local_trigger(cp, softirq); | ||
| 543 | } | ||
| 544 | EXPORT_SYMBOL(__send_remote_softirq); | ||
| 545 | |||
| 546 | /** | ||
| 547 | * send_remote_softirq - try to schedule softirq work on a remote cpu | ||
| 548 | * @cp: private SMP call function data area | ||
| 549 | * @cpu: the remote cpu | ||
| 550 | * @softirq: the softirq for the work | ||
| 551 | * | ||
| 552 | * Like __send_remote_softirq except that disabling interrupts and | ||
| 553 | * computing the current cpu is done for the caller. | ||
| 554 | */ | ||
| 555 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
| 556 | { | ||
| 557 | unsigned long flags; | ||
| 558 | int this_cpu; | ||
| 559 | |||
| 560 | local_irq_save(flags); | ||
| 561 | this_cpu = smp_processor_id(); | ||
| 562 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | ||
| 563 | local_irq_restore(flags); | ||
| 564 | } | ||
| 565 | EXPORT_SYMBOL(send_remote_softirq); | ||
| 566 | |||
| 567 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | ||
| 568 | unsigned long action, void *hcpu) | ||
| 569 | { | ||
| 570 | /* | ||
| 571 | * If a CPU goes away, splice its entries to the current CPU | ||
| 572 | * and trigger a run of the softirq | ||
| 573 | */ | ||
| 574 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
| 575 | int cpu = (unsigned long) hcpu; | ||
| 576 | int i; | ||
| 577 | |||
| 578 | local_irq_disable(); | ||
| 579 | for (i = 0; i < NR_SOFTIRQS; i++) { | ||
| 580 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | ||
| 581 | struct list_head *local_head; | ||
| 582 | |||
| 583 | if (list_empty(head)) | ||
| 584 | continue; | ||
| 585 | |||
| 586 | local_head = &__get_cpu_var(softirq_work_list[i]); | ||
| 587 | list_splice_init(head, local_head); | ||
| 588 | raise_softirq_irqoff(i); | ||
| 589 | } | ||
| 590 | local_irq_enable(); | ||
| 591 | } | ||
| 592 | |||
| 593 | return NOTIFY_OK; | ||
| 594 | } | ||
| 595 | |||
| 596 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | ||
| 597 | .notifier_call = remote_softirq_cpu_notify, | ||
| 598 | }; | ||
| 599 | |||
| 477 | void __init softirq_init(void) | 600 | void __init softirq_init(void) |
| 478 | { | 601 | { |
| 479 | int cpu; | 602 | int cpu; |
| 480 | 603 | ||
| 481 | for_each_possible_cpu(cpu) { | 604 | for_each_possible_cpu(cpu) { |
| 605 | int i; | ||
| 606 | |||
| 482 | per_cpu(tasklet_vec, cpu).tail = | 607 | per_cpu(tasklet_vec, cpu).tail = |
| 483 | &per_cpu(tasklet_vec, cpu).head; | 608 | &per_cpu(tasklet_vec, cpu).head; |
| 484 | per_cpu(tasklet_hi_vec, cpu).tail = | 609 | per_cpu(tasklet_hi_vec, cpu).tail = |
| 485 | &per_cpu(tasklet_hi_vec, cpu).head; | 610 | &per_cpu(tasklet_hi_vec, cpu).head; |
| 611 | for (i = 0; i < NR_SOFTIRQS; i++) | ||
| 612 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | ||
| 486 | } | 613 | } |
| 487 | 614 | ||
| 615 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); | ||
| 616 | |||
| 488 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); | 617 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
| 489 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 618 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
| 490 | } | 619 | } |
