aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/irq/manage.c10
-rw-r--r--kernel/module.c35
4 files changed, 42 insertions, 9 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index cce59cb5ee6a..7f2683a10ac4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -953,7 +953,8 @@ NORET_TYPE void do_exit(long code)
953 953
954 acct_update_integrals(tsk); 954 acct_update_integrals(tsk);
955 /* sync mm's RSS info before statistics gathering */ 955 /* sync mm's RSS info before statistics gathering */
956 sync_mm_rss(tsk, tsk->mm); 956 if (tsk->mm)
957 sync_mm_rss(tsk, tsk->mm);
957 group_dead = atomic_dec_and_test(&tsk->signal->live); 958 group_dead = atomic_dec_and_test(&tsk->signal->live);
958 if (group_dead) { 959 if (group_dead) {
959 hrtimer_cancel(&tsk->signal->real_timer); 960 hrtimer_cancel(&tsk->signal->real_timer);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4799c5f0e6d0..44b0791b0a2e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1052,6 +1052,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1052 p->prev_utime = cputime_zero; 1052 p->prev_utime = cputime_zero;
1053 p->prev_stime = cputime_zero; 1053 p->prev_stime = cputime_zero;
1054#endif 1054#endif
1055#if defined(SPLIT_RSS_COUNTING)
1056 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1057#endif
1055 1058
1056 p->default_timer_slack_ns = current->timer_slack_ns; 1059 p->default_timer_slack_ns = current->timer_slack_ns;
1057 1060
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 398fda155f6e..704e488730a5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -757,6 +757,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
757 if (new->flags & IRQF_ONESHOT) 757 if (new->flags & IRQF_ONESHOT)
758 desc->status |= IRQ_ONESHOT; 758 desc->status |= IRQ_ONESHOT;
759 759
760 /*
761 * Force MSI interrupts to run with interrupts
762 * disabled. The multi vector cards can cause stack
763 * overflows due to nested interrupts when enough of
764 * them are directed to a core and fire at the same
765 * time.
766 */
767 if (desc->msi_desc)
768 new->flags |= IRQF_DISABLED;
769
760 if (!(desc->status & IRQ_NOAUTOEN)) { 770 if (!(desc->status & IRQ_NOAUTOEN)) {
761 desc->depth = 0; 771 desc->depth = 0;
762 desc->status &= ~IRQ_DISABLED; 772 desc->status &= ~IRQ_DISABLED;
diff --git a/kernel/module.c b/kernel/module.c
index 9f8d23d8b3a8..1016b75b026a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -521,11 +521,13 @@ static void module_unload_init(struct module *mod)
521 int cpu; 521 int cpu;
522 522
523 INIT_LIST_HEAD(&mod->modules_which_use_me); 523 INIT_LIST_HEAD(&mod->modules_which_use_me);
524 for_each_possible_cpu(cpu) 524 for_each_possible_cpu(cpu) {
525 per_cpu_ptr(mod->refptr, cpu)->count = 0; 525 per_cpu_ptr(mod->refptr, cpu)->incs = 0;
526 per_cpu_ptr(mod->refptr, cpu)->decs = 0;
527 }
526 528
527 /* Hold reference count during initialization. */ 529 /* Hold reference count during initialization. */
528 __this_cpu_write(mod->refptr->count, 1); 530 __this_cpu_write(mod->refptr->incs, 1);
529 /* Backwards compatibility macros put refcount during init. */ 531 /* Backwards compatibility macros put refcount during init. */
530 mod->waiter = current; 532 mod->waiter = current;
531} 533}
@@ -664,12 +666,28 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
664 666
665unsigned int module_refcount(struct module *mod) 667unsigned int module_refcount(struct module *mod)
666{ 668{
667 unsigned int total = 0; 669 unsigned int incs = 0, decs = 0;
668 int cpu; 670 int cpu;
669 671
670 for_each_possible_cpu(cpu) 672 for_each_possible_cpu(cpu)
671 total += per_cpu_ptr(mod->refptr, cpu)->count; 673 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
672 return total; 674 /*
675 * ensure the incs are added up after the decs.
676 * module_put ensures incs are visible before decs with smp_wmb.
677 *
678 * This 2-count scheme avoids the situation where the refcount
679 * for CPU0 is read, then CPU0 increments the module refcount,
680 * then CPU1 drops that refcount, then the refcount for CPU1 is
681 * read. We would record a decrement but not its corresponding
682 * increment so we would see a low count (disaster).
683 *
684 * Rare situation? But module_refcount can be preempted, and we
685 * might be tallying up 4096+ CPUs. So it is not impossible.
686 */
687 smp_rmb();
688 for_each_possible_cpu(cpu)
689 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
690 return incs - decs;
673} 691}
674EXPORT_SYMBOL(module_refcount); 692EXPORT_SYMBOL(module_refcount);
675 693
@@ -846,10 +864,11 @@ void module_put(struct module *module)
846{ 864{
847 if (module) { 865 if (module) {
848 preempt_disable(); 866 preempt_disable();
849 __this_cpu_dec(module->refptr->count); 867 smp_wmb(); /* see comment in module_refcount */
868 __this_cpu_inc(module->refptr->decs);
850 869
851 trace_module_put(module, _RET_IP_, 870 trace_module_put(module, _RET_IP_,
852 __this_cpu_read(module->refptr->count)); 871 __this_cpu_read(module->refptr->decs));
853 /* Maybe they're waiting for us to drop reference? */ 872 /* Maybe they're waiting for us to drop reference? */
854 if (unlikely(!module_is_live(module))) 873 if (unlikely(!module_is_live(module)))
855 wake_up_process(module->waiter); 874 wake_up_process(module->waiter);