diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Kconfig.preempt | 25 | ||||
| -rw-r--r-- | kernel/Makefile | 6 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 12 | ||||
| -rw-r--r-- | kernel/lockdep.c | 27 | ||||
| -rw-r--r-- | kernel/printk.c | 2 | ||||
| -rw-r--r-- | kernel/rcupreempt.c | 10 | ||||
| -rw-r--r-- | kernel/rcupreempt_trace.c | 10 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 66 | ||||
| -rw-r--r-- | kernel/rcutree.c | 1535 | ||||
| -rw-r--r-- | kernel/rcutree_trace.c | 271 | ||||
| -rw-r--r-- | kernel/resource.c | 9 | ||||
| -rw-r--r-- | kernel/softirq.c | 19 | ||||
| -rw-r--r-- | kernel/stacktrace.c | 11 |
13 files changed, 1927 insertions, 76 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 9fdba03dc1f..bf987b95b35 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt | |||
| @@ -52,28 +52,3 @@ config PREEMPT | |||
| 52 | 52 | ||
| 53 | endchoice | 53 | endchoice |
| 54 | 54 | ||
| 55 | config PREEMPT_RCU | ||
| 56 | bool "Preemptible RCU" | ||
| 57 | depends on PREEMPT | ||
| 58 | default n | ||
| 59 | help | ||
| 60 | This option reduces the latency of the kernel by making certain | ||
| 61 | RCU sections preemptible. Normally RCU code is non-preemptible, if | ||
| 62 | this option is selected then read-only RCU sections become | ||
| 63 | preemptible. This helps latency, but may expose bugs due to | ||
| 64 | now-naive assumptions about each RCU read-side critical section | ||
| 65 | remaining on a given CPU through its execution. | ||
| 66 | |||
| 67 | Say N if you are unsure. | ||
| 68 | |||
| 69 | config RCU_TRACE | ||
| 70 | bool "Enable tracing for RCU - currently stats in debugfs" | ||
| 71 | depends on PREEMPT_RCU | ||
| 72 | select DEBUG_FS | ||
| 73 | default y | ||
| 74 | help | ||
| 75 | This option provides tracing in RCU which presents stats | ||
| 76 | in debugfs for debugging RCU implementation. | ||
| 77 | |||
| 78 | Say Y here if you want to enable RCU tracing | ||
| 79 | Say N if you are unsure. | ||
diff --git a/kernel/Makefile b/kernel/Makefile index 19fad003b19..b4fdbbff5ec 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -74,10 +74,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ | |||
| 74 | obj-$(CONFIG_SECCOMP) += seccomp.o | 74 | obj-$(CONFIG_SECCOMP) += seccomp.o |
| 75 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 75 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
| 76 | obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o | 76 | obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o |
| 77 | obj-$(CONFIG_TREE_RCU) += rcutree.o | ||
| 77 | obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o | 78 | obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o |
| 78 | ifeq ($(CONFIG_PREEMPT_RCU),y) | 79 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o |
| 79 | obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o | 80 | obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o |
| 80 | endif | ||
| 81 | obj-$(CONFIG_RELAY) += relay.o | 81 | obj-$(CONFIG_RELAY) += relay.o |
| 82 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 82 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
| 83 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 83 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 801addda3c4..e9d1c8205a3 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -673,6 +673,18 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 673 | struct irq_desc *desc; | 673 | struct irq_desc *desc; |
| 674 | int retval; | 674 | int retval; |
| 675 | 675 | ||
| 676 | /* | ||
| 677 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
| 678 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
| 679 | * the behavior is classified as "will not fix" so we need to | ||
| 680 | * start nudging drivers away from using that idiom. | ||
| 681 | */ | ||
| 682 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | ||
| 683 | == (IRQF_SHARED|IRQF_DISABLED)) | ||
| 684 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | ||
| 685 | "guaranteed on shared IRQs\n", | ||
| 686 | irq, devname); | ||
| 687 | |||
| 676 | #ifdef CONFIG_LOCKDEP | 688 | #ifdef CONFIG_LOCKDEP |
| 677 | /* | 689 | /* |
| 678 | * Lockdep wants atomic interrupt handlers: | 690 | * Lockdep wants atomic interrupt handlers: |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e4bdda8dcf0..4fa6eeb4e8a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -291,14 +291,12 @@ void lockdep_off(void) | |||
| 291 | { | 291 | { |
| 292 | current->lockdep_recursion++; | 292 | current->lockdep_recursion++; |
| 293 | } | 293 | } |
| 294 | |||
| 295 | EXPORT_SYMBOL(lockdep_off); | 294 | EXPORT_SYMBOL(lockdep_off); |
| 296 | 295 | ||
| 297 | void lockdep_on(void) | 296 | void lockdep_on(void) |
| 298 | { | 297 | { |
| 299 | current->lockdep_recursion--; | 298 | current->lockdep_recursion--; |
| 300 | } | 299 | } |
| 301 | |||
| 302 | EXPORT_SYMBOL(lockdep_on); | 300 | EXPORT_SYMBOL(lockdep_on); |
| 303 | 301 | ||
| 304 | /* | 302 | /* |
| @@ -580,7 +578,8 @@ static void print_lock_class_header(struct lock_class *class, int depth) | |||
| 580 | /* | 578 | /* |
| 581 | * printk all lock dependencies starting at <entry>: | 579 | * printk all lock dependencies starting at <entry>: |
| 582 | */ | 580 | */ |
| 583 | static void print_lock_dependencies(struct lock_class *class, int depth) | 581 | static void __used |
| 582 | print_lock_dependencies(struct lock_class *class, int depth) | ||
| 584 | { | 583 | { |
| 585 | struct lock_list *entry; | 584 | struct lock_list *entry; |
| 586 | 585 | ||
| @@ -2512,7 +2511,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 2512 | if (subclass) | 2511 | if (subclass) |
| 2513 | register_lock_class(lock, subclass, 1); | 2512 | register_lock_class(lock, subclass, 1); |
| 2514 | } | 2513 | } |
| 2515 | |||
| 2516 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 2514 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
| 2517 | 2515 | ||
| 2518 | /* | 2516 | /* |
| @@ -2693,8 +2691,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
| 2693 | } | 2691 | } |
| 2694 | 2692 | ||
| 2695 | static int | 2693 | static int |
| 2696 | __lock_set_subclass(struct lockdep_map *lock, | 2694 | __lock_set_class(struct lockdep_map *lock, const char *name, |
| 2697 | unsigned int subclass, unsigned long ip) | 2695 | struct lock_class_key *key, unsigned int subclass, |
| 2696 | unsigned long ip) | ||
| 2698 | { | 2697 | { |
| 2699 | struct task_struct *curr = current; | 2698 | struct task_struct *curr = current; |
| 2700 | struct held_lock *hlock, *prev_hlock; | 2699 | struct held_lock *hlock, *prev_hlock; |
| @@ -2721,6 +2720,7 @@ __lock_set_subclass(struct lockdep_map *lock, | |||
| 2721 | return print_unlock_inbalance_bug(curr, lock, ip); | 2720 | return print_unlock_inbalance_bug(curr, lock, ip); |
| 2722 | 2721 | ||
| 2723 | found_it: | 2722 | found_it: |
| 2723 | lockdep_init_map(lock, name, key, 0); | ||
| 2724 | class = register_lock_class(lock, subclass, 0); | 2724 | class = register_lock_class(lock, subclass, 0); |
| 2725 | hlock->class_idx = class - lock_classes + 1; | 2725 | hlock->class_idx = class - lock_classes + 1; |
| 2726 | 2726 | ||
| @@ -2905,9 +2905,9 @@ static void check_flags(unsigned long flags) | |||
| 2905 | #endif | 2905 | #endif |
| 2906 | } | 2906 | } |
| 2907 | 2907 | ||
| 2908 | void | 2908 | void lock_set_class(struct lockdep_map *lock, const char *name, |
| 2909 | lock_set_subclass(struct lockdep_map *lock, | 2909 | struct lock_class_key *key, unsigned int subclass, |
| 2910 | unsigned int subclass, unsigned long ip) | 2910 | unsigned long ip) |
| 2911 | { | 2911 | { |
| 2912 | unsigned long flags; | 2912 | unsigned long flags; |
| 2913 | 2913 | ||
| @@ -2917,13 +2917,12 @@ lock_set_subclass(struct lockdep_map *lock, | |||
| 2917 | raw_local_irq_save(flags); | 2917 | raw_local_irq_save(flags); |
| 2918 | current->lockdep_recursion = 1; | 2918 | current->lockdep_recursion = 1; |
| 2919 | check_flags(flags); | 2919 | check_flags(flags); |
| 2920 | if (__lock_set_subclass(lock, subclass, ip)) | 2920 | if (__lock_set_class(lock, name, key, subclass, ip)) |
| 2921 | check_chain_key(current); | 2921 | check_chain_key(current); |
| 2922 | current->lockdep_recursion = 0; | 2922 | current->lockdep_recursion = 0; |
| 2923 | raw_local_irq_restore(flags); | 2923 | raw_local_irq_restore(flags); |
| 2924 | } | 2924 | } |
| 2925 | 2925 | EXPORT_SYMBOL_GPL(lock_set_class); | |
| 2926 | EXPORT_SYMBOL_GPL(lock_set_subclass); | ||
| 2927 | 2926 | ||
| 2928 | /* | 2927 | /* |
| 2929 | * We are not always called with irqs disabled - do that here, | 2928 | * We are not always called with irqs disabled - do that here, |
| @@ -2947,7 +2946,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2947 | current->lockdep_recursion = 0; | 2946 | current->lockdep_recursion = 0; |
| 2948 | raw_local_irq_restore(flags); | 2947 | raw_local_irq_restore(flags); |
| 2949 | } | 2948 | } |
| 2950 | |||
| 2951 | EXPORT_SYMBOL_GPL(lock_acquire); | 2949 | EXPORT_SYMBOL_GPL(lock_acquire); |
| 2952 | 2950 | ||
| 2953 | void lock_release(struct lockdep_map *lock, int nested, | 2951 | void lock_release(struct lockdep_map *lock, int nested, |
| @@ -2965,7 +2963,6 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
| 2965 | current->lockdep_recursion = 0; | 2963 | current->lockdep_recursion = 0; |
| 2966 | raw_local_irq_restore(flags); | 2964 | raw_local_irq_restore(flags); |
| 2967 | } | 2965 | } |
| 2968 | |||
| 2969 | EXPORT_SYMBOL_GPL(lock_release); | 2966 | EXPORT_SYMBOL_GPL(lock_release); |
| 2970 | 2967 | ||
| 2971 | #ifdef CONFIG_LOCK_STAT | 2968 | #ifdef CONFIG_LOCK_STAT |
| @@ -3450,7 +3447,6 @@ retry: | |||
| 3450 | if (unlock) | 3447 | if (unlock) |
| 3451 | read_unlock(&tasklist_lock); | 3448 | read_unlock(&tasklist_lock); |
| 3452 | } | 3449 | } |
| 3453 | |||
| 3454 | EXPORT_SYMBOL_GPL(debug_show_all_locks); | 3450 | EXPORT_SYMBOL_GPL(debug_show_all_locks); |
| 3455 | 3451 | ||
| 3456 | /* | 3452 | /* |
| @@ -3471,7 +3467,6 @@ void debug_show_held_locks(struct task_struct *task) | |||
| 3471 | { | 3467 | { |
| 3472 | __debug_show_held_locks(task); | 3468 | __debug_show_held_locks(task); |
| 3473 | } | 3469 | } |
| 3474 | |||
| 3475 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 3470 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
| 3476 | 3471 | ||
| 3477 | void lockdep_sys_exit(void) | 3472 | void lockdep_sys_exit(void) |
diff --git a/kernel/printk.c b/kernel/printk.c index f492f1583d7..e651ab05655 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
| 662 | if (recursion_bug) { | 662 | if (recursion_bug) { |
| 663 | recursion_bug = 0; | 663 | recursion_bug = 0; |
| 664 | strcpy(printk_buf, recursion_bug_msg); | 664 | strcpy(printk_buf, recursion_bug_msg); |
| 665 | printed_len = sizeof(recursion_bug_msg); | 665 | printed_len = strlen(recursion_bug_msg); |
| 666 | } | 666 | } |
| 667 | /* Emit the output into the temporary buffer */ | 667 | /* Emit the output into the temporary buffer */ |
| 668 | printed_len += vscnprintf(printk_buf + printed_len, | 668 | printed_len += vscnprintf(printk_buf + printed_len, |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 59236e8b9da..04982659875 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
| @@ -551,6 +551,16 @@ void rcu_irq_exit(void) | |||
| 551 | } | 551 | } |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | void rcu_nmi_enter(void) | ||
| 555 | { | ||
| 556 | rcu_irq_enter(); | ||
| 557 | } | ||
| 558 | |||
| 559 | void rcu_nmi_exit(void) | ||
| 560 | { | ||
| 561 | rcu_irq_exit(); | ||
| 562 | } | ||
| 563 | |||
| 554 | static void dyntick_save_progress_counter(int cpu) | 564 | static void dyntick_save_progress_counter(int cpu) |
| 555 | { | 565 | { |
| 556 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | 566 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c index 35c2d3360ec..7c2665cac17 100644 --- a/kernel/rcupreempt_trace.c +++ b/kernel/rcupreempt_trace.c | |||
| @@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp) | |||
| 149 | sp->done_length += cp->done_length; | 149 | sp->done_length += cp->done_length; |
| 150 | sp->done_add += cp->done_add; | 150 | sp->done_add += cp->done_add; |
| 151 | sp->done_remove += cp->done_remove; | 151 | sp->done_remove += cp->done_remove; |
| 152 | atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked)); | 152 | atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked); |
| 153 | sp->rcu_check_callbacks += cp->rcu_check_callbacks; | 153 | sp->rcu_check_callbacks += cp->rcu_check_callbacks; |
| 154 | atomic_set(&sp->rcu_try_flip_1, | 154 | atomic_add(atomic_read(&cp->rcu_try_flip_1), |
| 155 | atomic_read(&cp->rcu_try_flip_1)); | 155 | &sp->rcu_try_flip_1); |
| 156 | atomic_set(&sp->rcu_try_flip_e1, | 156 | atomic_add(atomic_read(&cp->rcu_try_flip_e1), |
| 157 | atomic_read(&cp->rcu_try_flip_e1)); | 157 | &sp->rcu_try_flip_e1); |
| 158 | sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1; | 158 | sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1; |
| 159 | sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1; | 159 | sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1; |
| 160 | sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1; | 160 | sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1; |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 85cb90588a5..b3106552210 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
| 40 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
| 41 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
| 42 | #include <linux/reboot.h> | ||
| 42 | #include <linux/freezer.h> | 43 | #include <linux/freezer.h> |
| 43 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
| 44 | #include <linux/delay.h> | 45 | #include <linux/delay.h> |
| @@ -108,7 +109,6 @@ struct rcu_torture { | |||
| 108 | int rtort_mbtest; | 109 | int rtort_mbtest; |
| 109 | }; | 110 | }; |
| 110 | 111 | ||
| 111 | static int fullstop = 0; /* stop generating callbacks at test end. */ | ||
| 112 | static LIST_HEAD(rcu_torture_freelist); | 112 | static LIST_HEAD(rcu_torture_freelist); |
| 113 | static struct rcu_torture *rcu_torture_current = NULL; | 113 | static struct rcu_torture *rcu_torture_current = NULL; |
| 114 | static long rcu_torture_current_version = 0; | 114 | static long rcu_torture_current_version = 0; |
| @@ -136,6 +136,30 @@ static int stutter_pause_test = 0; | |||
| 136 | #endif | 136 | #endif |
| 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
| 138 | 138 | ||
| 139 | #define FULLSTOP_SIGNALED 1 /* Bail due to signal. */ | ||
| 140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ | ||
| 141 | static int fullstop; /* stop generating callbacks at test end. */ | ||
| 142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ | ||
| 143 | /* spawning of kthreads. */ | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Detect and respond to a signal-based shutdown. | ||
| 147 | */ | ||
| 148 | static int | ||
| 149 | rcutorture_shutdown_notify(struct notifier_block *unused1, | ||
| 150 | unsigned long unused2, void *unused3) | ||
| 151 | { | ||
| 152 | if (fullstop) | ||
| 153 | return NOTIFY_DONE; | ||
| 154 | if (signal_pending(current)) { | ||
| 155 | mutex_lock(&fullstop_mutex); | ||
| 156 | if (!ACCESS_ONCE(fullstop)) | ||
| 157 | fullstop = FULLSTOP_SIGNALED; | ||
| 158 | mutex_unlock(&fullstop_mutex); | ||
| 159 | } | ||
| 160 | return NOTIFY_DONE; | ||
| 161 | } | ||
| 162 | |||
| 139 | /* | 163 | /* |
| 140 | * Allocate an element from the rcu_tortures pool. | 164 | * Allocate an element from the rcu_tortures pool. |
| 141 | */ | 165 | */ |
| @@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp) | |||
| 199 | static void | 223 | static void |
| 200 | rcu_stutter_wait(void) | 224 | rcu_stutter_wait(void) |
| 201 | { | 225 | { |
| 202 | while (stutter_pause_test || !rcutorture_runnable) | 226 | while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) { |
| 203 | if (rcutorture_runnable) | 227 | if (rcutorture_runnable) |
| 204 | schedule_timeout_interruptible(1); | 228 | schedule_timeout_interruptible(1); |
| 205 | else | 229 | else |
| 206 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | 230 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); |
| 231 | } | ||
| 207 | } | 232 | } |
| 208 | 233 | ||
| 209 | /* | 234 | /* |
| @@ -599,7 +624,7 @@ rcu_torture_writer(void *arg) | |||
| 599 | rcu_stutter_wait(); | 624 | rcu_stutter_wait(); |
| 600 | } while (!kthread_should_stop() && !fullstop); | 625 | } while (!kthread_should_stop() && !fullstop); |
| 601 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 626 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
| 602 | while (!kthread_should_stop()) | 627 | while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) |
| 603 | schedule_timeout_uninterruptible(1); | 628 | schedule_timeout_uninterruptible(1); |
| 604 | return 0; | 629 | return 0; |
| 605 | } | 630 | } |
| @@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg) | |||
| 624 | } while (!kthread_should_stop() && !fullstop); | 649 | } while (!kthread_should_stop() && !fullstop); |
| 625 | 650 | ||
| 626 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 651 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
| 627 | while (!kthread_should_stop()) | 652 | while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) |
| 628 | schedule_timeout_uninterruptible(1); | 653 | schedule_timeout_uninterruptible(1); |
| 629 | return 0; | 654 | return 0; |
| 630 | } | 655 | } |
| @@ -734,7 +759,7 @@ rcu_torture_reader(void *arg) | |||
| 734 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 759 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
| 735 | if (irqreader && cur_ops->irqcapable) | 760 | if (irqreader && cur_ops->irqcapable) |
| 736 | del_timer_sync(&t); | 761 | del_timer_sync(&t); |
| 737 | while (!kthread_should_stop()) | 762 | while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) |
| 738 | schedule_timeout_uninterruptible(1); | 763 | schedule_timeout_uninterruptible(1); |
| 739 | return 0; | 764 | return 0; |
| 740 | } | 765 | } |
| @@ -831,7 +856,7 @@ rcu_torture_stats(void *arg) | |||
| 831 | do { | 856 | do { |
| 832 | schedule_timeout_interruptible(stat_interval * HZ); | 857 | schedule_timeout_interruptible(stat_interval * HZ); |
| 833 | rcu_torture_stats_print(); | 858 | rcu_torture_stats_print(); |
| 834 | } while (!kthread_should_stop()); | 859 | } while (!kthread_should_stop() && !fullstop); |
| 835 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | 860 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); |
| 836 | return 0; | 861 | return 0; |
| 837 | } | 862 | } |
| @@ -899,7 +924,7 @@ rcu_torture_shuffle(void *arg) | |||
| 899 | do { | 924 | do { |
| 900 | schedule_timeout_interruptible(shuffle_interval * HZ); | 925 | schedule_timeout_interruptible(shuffle_interval * HZ); |
| 901 | rcu_torture_shuffle_tasks(); | 926 | rcu_torture_shuffle_tasks(); |
| 902 | } while (!kthread_should_stop()); | 927 | } while (!kthread_should_stop() && !fullstop); |
| 903 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | 928 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); |
| 904 | return 0; | 929 | return 0; |
| 905 | } | 930 | } |
| @@ -914,10 +939,10 @@ rcu_torture_stutter(void *arg) | |||
| 914 | do { | 939 | do { |
| 915 | schedule_timeout_interruptible(stutter * HZ); | 940 | schedule_timeout_interruptible(stutter * HZ); |
| 916 | stutter_pause_test = 1; | 941 | stutter_pause_test = 1; |
| 917 | if (!kthread_should_stop()) | 942 | if (!kthread_should_stop() && !fullstop) |
| 918 | schedule_timeout_interruptible(stutter * HZ); | 943 | schedule_timeout_interruptible(stutter * HZ); |
| 919 | stutter_pause_test = 0; | 944 | stutter_pause_test = 0; |
| 920 | } while (!kthread_should_stop()); | 945 | } while (!kthread_should_stop() && !fullstop); |
| 921 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | 946 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); |
| 922 | return 0; | 947 | return 0; |
| 923 | } | 948 | } |
| @@ -934,12 +959,27 @@ rcu_torture_print_module_parms(char *tag) | |||
| 934 | stutter, irqreader); | 959 | stutter, irqreader); |
| 935 | } | 960 | } |
| 936 | 961 | ||
| 962 | static struct notifier_block rcutorture_nb = { | ||
| 963 | .notifier_call = rcutorture_shutdown_notify, | ||
| 964 | }; | ||
| 965 | |||
| 937 | static void | 966 | static void |
| 938 | rcu_torture_cleanup(void) | 967 | rcu_torture_cleanup(void) |
| 939 | { | 968 | { |
| 940 | int i; | 969 | int i; |
| 941 | 970 | ||
| 942 | fullstop = 1; | 971 | mutex_lock(&fullstop_mutex); |
| 972 | if (!fullstop) { | ||
| 973 | /* If being signaled, let it happen, then exit. */ | ||
| 974 | mutex_unlock(&fullstop_mutex); | ||
| 975 | schedule_timeout_interruptible(10 * HZ); | ||
| 976 | if (cur_ops->cb_barrier != NULL) | ||
| 977 | cur_ops->cb_barrier(); | ||
| 978 | return; | ||
| 979 | } | ||
| 980 | fullstop = FULLSTOP_CLEANUP; | ||
| 981 | mutex_unlock(&fullstop_mutex); | ||
| 982 | unregister_reboot_notifier(&rcutorture_nb); | ||
| 943 | if (stutter_task) { | 983 | if (stutter_task) { |
| 944 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); | 984 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); |
| 945 | kthread_stop(stutter_task); | 985 | kthread_stop(stutter_task); |
| @@ -1015,6 +1055,8 @@ rcu_torture_init(void) | |||
| 1015 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 1055 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, |
| 1016 | &srcu_ops, &sched_ops, &sched_ops_sync, }; | 1056 | &srcu_ops, &sched_ops, &sched_ops_sync, }; |
| 1017 | 1057 | ||
| 1058 | mutex_lock(&fullstop_mutex); | ||
| 1059 | |||
| 1018 | /* Process args and tell the world that the torturer is on the job. */ | 1060 | /* Process args and tell the world that the torturer is on the job. */ |
| 1019 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | 1061 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
| 1020 | cur_ops = torture_ops[i]; | 1062 | cur_ops = torture_ops[i]; |
| @@ -1024,6 +1066,7 @@ rcu_torture_init(void) | |||
| 1024 | if (i == ARRAY_SIZE(torture_ops)) { | 1066 | if (i == ARRAY_SIZE(torture_ops)) { |
| 1025 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", | 1067 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", |
| 1026 | torture_type); | 1068 | torture_type); |
| 1069 | mutex_unlock(&fullstop_mutex); | ||
| 1027 | return (-EINVAL); | 1070 | return (-EINVAL); |
| 1028 | } | 1071 | } |
| 1029 | if (cur_ops->init) | 1072 | if (cur_ops->init) |
| @@ -1146,9 +1189,12 @@ rcu_torture_init(void) | |||
| 1146 | goto unwind; | 1189 | goto unwind; |
| 1147 | } | 1190 | } |
| 1148 | } | 1191 | } |
| 1192 | register_reboot_notifier(&rcutorture_nb); | ||
| 1193 | mutex_unlock(&fullstop_mutex); | ||
| 1149 | return 0; | 1194 | return 0; |
| 1150 | 1195 | ||
| 1151 | unwind: | 1196 | unwind: |
| 1197 | mutex_unlock(&fullstop_mutex); | ||
| 1152 | rcu_torture_cleanup(); | 1198 | rcu_torture_cleanup(); |
| 1153 | return firsterr; | 1199 | return firsterr; |
| 1154 | } | 1200 | } |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c new file mode 100644 index 00000000000..a342b032112 --- /dev/null +++ b/kernel/rcutree.c | |||
| @@ -0,0 +1,1535 @@ | |||
| 1 | /* | ||
| 2 | * Read-Copy Update mechanism for mutual exclusion | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2008 | ||
| 19 | * | ||
| 20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | ||
| 21 | * Manfred Spraul <manfred@colorfullife.com> | ||
| 22 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version | ||
| 23 | * | ||
| 24 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
| 25 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
| 26 | * | ||
| 27 | * For detailed explanation of Read-Copy Update mechanism see - | ||
| 28 | * Documentation/RCU | ||
| 29 | */ | ||
| 30 | #include <linux/types.h> | ||
| 31 | #include <linux/kernel.h> | ||
| 32 | #include <linux/init.h> | ||
| 33 | #include <linux/spinlock.h> | ||
| 34 | #include <linux/smp.h> | ||
| 35 | #include <linux/rcupdate.h> | ||
| 36 | #include <linux/interrupt.h> | ||
| 37 | #include <linux/sched.h> | ||
| 38 | #include <asm/atomic.h> | ||
| 39 | #include <linux/bitops.h> | ||
| 40 | #include <linux/module.h> | ||
| 41 | #include <linux/completion.h> | ||
| 42 | #include <linux/moduleparam.h> | ||
| 43 | #include <linux/percpu.h> | ||
| 44 | #include <linux/notifier.h> | ||
| 45 | #include <linux/cpu.h> | ||
| 46 | #include <linux/mutex.h> | ||
| 47 | #include <linux/time.h> | ||
| 48 | |||
| 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 50 | static struct lock_class_key rcu_lock_key; | ||
| 51 | struct lockdep_map rcu_lock_map = | ||
| 52 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | ||
| 53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | ||
| 54 | #endif | ||
| 55 | |||
| 56 | /* Data structures. */ | ||
| 57 | |||
| 58 | #define RCU_STATE_INITIALIZER(name) { \ | ||
| 59 | .level = { &name.node[0] }, \ | ||
| 60 | .levelcnt = { \ | ||
| 61 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | ||
| 62 | NUM_RCU_LVL_1, \ | ||
| 63 | NUM_RCU_LVL_2, \ | ||
| 64 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | ||
| 65 | }, \ | ||
| 66 | .signaled = RCU_SIGNAL_INIT, \ | ||
| 67 | .gpnum = -300, \ | ||
| 68 | .completed = -300, \ | ||
| 69 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | ||
| 70 | .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ | ||
| 71 | .n_force_qs = 0, \ | ||
| 72 | .n_force_qs_ngp = 0, \ | ||
| 73 | } | ||
| 74 | |||
| 75 | struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); | ||
| 76 | DEFINE_PER_CPU(struct rcu_data, rcu_data); | ||
| 77 | |||
| 78 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | ||
| 79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
| 80 | |||
| 81 | #ifdef CONFIG_NO_HZ | ||
| 82 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks); | ||
| 83 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 84 | |||
| 85 | static int blimit = 10; /* Maximum callbacks per softirq. */ | ||
| 86 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ | ||
| 87 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | ||
| 88 | |||
| 89 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | ||
| 90 | |||
| 91 | /* | ||
| 92 | * Return the number of RCU batches processed thus far for debug & stats. | ||
| 93 | */ | ||
| 94 | long rcu_batches_completed(void) | ||
| 95 | { | ||
| 96 | return rcu_state.completed; | ||
| 97 | } | ||
| 98 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Return the number of RCU BH batches processed thus far for debug & stats. | ||
| 102 | */ | ||
| 103 | long rcu_batches_completed_bh(void) | ||
| 104 | { | ||
| 105 | return rcu_bh_state.completed; | ||
| 106 | } | ||
| 107 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Does the CPU have callbacks ready to be invoked? | ||
| 111 | */ | ||
| 112 | static int | ||
| 113 | cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) | ||
| 114 | { | ||
| 115 | return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Does the current CPU require a yet-as-unscheduled grace period? | ||
| 120 | */ | ||
| 121 | static int | ||
| 122 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 123 | { | ||
| 124 | /* ACCESS_ONCE() because we are accessing outside of lock. */ | ||
| 125 | return *rdp->nxttail[RCU_DONE_TAIL] && | ||
| 126 | ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum); | ||
| 127 | } | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Return the root node of the specified rcu_state structure. | ||
| 131 | */ | ||
| 132 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | ||
| 133 | { | ||
| 134 | return &rsp->node[0]; | ||
| 135 | } | ||
| 136 | |||
| 137 | #ifdef CONFIG_SMP | ||
| 138 | |||
| 139 | /* | ||
| 140 | * If the specified CPU is offline, tell the caller that it is in | ||
| 141 | * a quiescent state. Otherwise, whack it with a reschedule IPI. | ||
| 142 | * Grace periods can end up waiting on an offline CPU when that | ||
| 143 | * CPU is in the process of coming online -- it will be added to the | ||
| 144 | * rcu_node bitmasks before it actually makes it online. The same thing | ||
| 145 | * can happen while a CPU is in the process of coming online. Because this | ||
| 146 | * race is quite rare, we check for it after detecting that the grace | ||
| 147 | * period has been delayed rather than checking each and every CPU | ||
| 148 | * each and every time we start a new grace period. | ||
| 149 | */ | ||
| 150 | static int rcu_implicit_offline_qs(struct rcu_data *rdp) | ||
| 151 | { | ||
| 152 | /* | ||
| 153 | * If the CPU is offline, it is in a quiescent state. We can | ||
| 154 | * trust its state not to change because interrupts are disabled. | ||
| 155 | */ | ||
| 156 | if (cpu_is_offline(rdp->cpu)) { | ||
| 157 | rdp->offline_fqs++; | ||
| 158 | return 1; | ||
| 159 | } | ||
| 160 | |||
| 161 | /* The CPU is online, so send it a reschedule IPI. */ | ||
| 162 | if (rdp->cpu != smp_processor_id()) | ||
| 163 | smp_send_reschedule(rdp->cpu); | ||
| 164 | else | ||
| 165 | set_need_resched(); | ||
| 166 | rdp->resched_ipi++; | ||
| 167 | return 0; | ||
| 168 | } | ||
| 169 | |||
| 170 | #endif /* #ifdef CONFIG_SMP */ | ||
| 171 | |||
| 172 | #ifdef CONFIG_NO_HZ | ||
| 173 | static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | ||
| 177 | * | ||
| 178 | * Enter nohz mode, in other words, -leave- the mode in which RCU | ||
| 179 | * read-side critical sections can occur. (Though RCU read-side | ||
| 180 | * critical sections can occur in irq handlers in nohz mode, a possibility | ||
| 181 | * handled by rcu_irq_enter() and rcu_irq_exit()). | ||
| 182 | */ | ||
| 183 | void rcu_enter_nohz(void) | ||
| 184 | { | ||
| 185 | unsigned long flags; | ||
| 186 | struct rcu_dynticks *rdtp; | ||
| 187 | |||
| 188 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | ||
| 189 | local_irq_save(flags); | ||
| 190 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 191 | rdtp->dynticks++; | ||
| 192 | rdtp->dynticks_nesting--; | ||
| 193 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | ||
| 194 | local_irq_restore(flags); | ||
| 195 | } | ||
| 196 | |||
| 197 | /* | ||
| 198 | * rcu_exit_nohz - inform RCU that current CPU is leaving nohz | ||
| 199 | * | ||
| 200 | * Exit nohz mode, in other words, -enter- the mode in which RCU | ||
| 201 | * read-side critical sections normally occur. | ||
| 202 | */ | ||
| 203 | void rcu_exit_nohz(void) | ||
| 204 | { | ||
| 205 | unsigned long flags; | ||
| 206 | struct rcu_dynticks *rdtp; | ||
| 207 | |||
| 208 | local_irq_save(flags); | ||
| 209 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 210 | rdtp->dynticks++; | ||
| 211 | rdtp->dynticks_nesting++; | ||
| 212 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | ||
| 213 | local_irq_restore(flags); | ||
| 214 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
| 215 | } | ||
| 216 | |||
| 217 | /** | ||
| 218 | * rcu_nmi_enter - inform RCU of entry to NMI context | ||
| 219 | * | ||
| 220 | * If the CPU was idle with dynamic ticks active, and there is no | ||
| 221 | * irq handler running, this updates rdtp->dynticks_nmi to let the | ||
| 222 | * RCU grace-period handling know that the CPU is active. | ||
| 223 | */ | ||
| 224 | void rcu_nmi_enter(void) | ||
| 225 | { | ||
| 226 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 227 | |||
| 228 | if (rdtp->dynticks & 0x1) | ||
| 229 | return; | ||
| 230 | rdtp->dynticks_nmi++; | ||
| 231 | WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); | ||
| 232 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
| 233 | } | ||
| 234 | |||
| 235 | /** | ||
| 236 | * rcu_nmi_exit - inform RCU of exit from NMI context | ||
| 237 | * | ||
| 238 | * If the CPU was idle with dynamic ticks active, and there is no | ||
| 239 | * irq handler running, this updates rdtp->dynticks_nmi to let the | ||
| 240 | * RCU grace-period handling know that the CPU is no longer active. | ||
| 241 | */ | ||
| 242 | void rcu_nmi_exit(void) | ||
| 243 | { | ||
| 244 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 245 | |||
| 246 | if (rdtp->dynticks & 0x1) | ||
| 247 | return; | ||
| 248 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | ||
| 249 | rdtp->dynticks_nmi++; | ||
| 250 | WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); | ||
| 251 | } | ||
| 252 | |||
| 253 | /** | ||
| 254 | * rcu_irq_enter - inform RCU of entry to hard irq context | ||
| 255 | * | ||
| 256 | * If the CPU was idle with dynamic ticks active, this updates the | ||
| 257 | * rdtp->dynticks to let the RCU handling know that the CPU is active. | ||
| 258 | */ | ||
| 259 | void rcu_irq_enter(void) | ||
| 260 | { | ||
| 261 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 262 | |||
| 263 | if (rdtp->dynticks_nesting++) | ||
| 264 | return; | ||
| 265 | rdtp->dynticks++; | ||
| 266 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | ||
| 267 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
| 268 | } | ||
| 269 | |||
| 270 | /** | ||
| 271 | * rcu_irq_exit - inform RCU of exit from hard irq context | ||
| 272 | * | ||
| 273 | * If the CPU was idle with dynamic ticks active, update the rdp->dynticks | ||
| 274 | * to put let the RCU handling be aware that the CPU is going back to idle | ||
| 275 | * with no ticks. | ||
| 276 | */ | ||
| 277 | void rcu_irq_exit(void) | ||
| 278 | { | ||
| 279 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 280 | |||
| 281 | if (--rdtp->dynticks_nesting) | ||
| 282 | return; | ||
| 283 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | ||
| 284 | rdtp->dynticks++; | ||
| 285 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | ||
| 286 | |||
| 287 | /* If the interrupt queued a callback, get out of dyntick mode. */ | ||
| 288 | if (__get_cpu_var(rcu_data).nxtlist || | ||
| 289 | __get_cpu_var(rcu_bh_data).nxtlist) | ||
| 290 | set_need_resched(); | ||
| 291 | } | ||
| 292 | |||
| 293 | /* | ||
| 294 | * Record the specified "completed" value, which is later used to validate | ||
| 295 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
| 296 | * unconditionally invalidate any future dynticks manipulations (which is | ||
| 297 | * useful at the beginning of a grace period). | ||
| 298 | */ | ||
| 299 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
| 300 | { | ||
| 301 | rsp->dynticks_completed = comp; | ||
| 302 | } | ||
| 303 | |||
| 304 | #ifdef CONFIG_SMP | ||
| 305 | |||
| 306 | /* | ||
| 307 | * Recall the previously recorded value of the completion for dynticks. | ||
| 308 | */ | ||
| 309 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
| 310 | { | ||
| 311 | return rsp->dynticks_completed; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* | ||
| 315 | * Snapshot the specified CPU's dynticks counter so that we can later | ||
| 316 | * credit them with an implicit quiescent state. Return 1 if this CPU | ||
| 317 | * is already in a quiescent state courtesy of dynticks idle mode. | ||
| 318 | */ | ||
| 319 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | ||
| 320 | { | ||
| 321 | int ret; | ||
| 322 | int snap; | ||
| 323 | int snap_nmi; | ||
| 324 | |||
| 325 | snap = rdp->dynticks->dynticks; | ||
| 326 | snap_nmi = rdp->dynticks->dynticks_nmi; | ||
| 327 | smp_mb(); /* Order sampling of snap with end of grace period. */ | ||
| 328 | rdp->dynticks_snap = snap; | ||
| 329 | rdp->dynticks_nmi_snap = snap_nmi; | ||
| 330 | ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0); | ||
| 331 | if (ret) | ||
| 332 | rdp->dynticks_fqs++; | ||
| 333 | return ret; | ||
| 334 | } | ||
| 335 | |||
| 336 | /* | ||
| 337 | * Return true if the specified CPU has passed through a quiescent | ||
| 338 | * state by virtue of being in or having passed through an dynticks | ||
| 339 | * idle state since the last call to dyntick_save_progress_counter() | ||
| 340 | * for this same CPU. | ||
| 341 | */ | ||
| 342 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | ||
| 343 | { | ||
| 344 | long curr; | ||
| 345 | long curr_nmi; | ||
| 346 | long snap; | ||
| 347 | long snap_nmi; | ||
| 348 | |||
| 349 | curr = rdp->dynticks->dynticks; | ||
| 350 | snap = rdp->dynticks_snap; | ||
| 351 | curr_nmi = rdp->dynticks->dynticks_nmi; | ||
| 352 | snap_nmi = rdp->dynticks_nmi_snap; | ||
| 353 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
| 354 | |||
| 355 | /* | ||
| 356 | * If the CPU passed through or entered a dynticks idle phase with | ||
| 357 | * no active irq/NMI handlers, then we can safely pretend that the CPU | ||
| 358 | * already acknowledged the request to pass through a quiescent | ||
| 359 | * state. Either way, that CPU cannot possibly be in an RCU | ||
| 360 | * read-side critical section that started before the beginning | ||
| 361 | * of the current RCU grace period. | ||
| 362 | */ | ||
| 363 | if ((curr != snap || (curr & 0x1) == 0) && | ||
| 364 | (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) { | ||
| 365 | rdp->dynticks_fqs++; | ||
| 366 | return 1; | ||
| 367 | } | ||
| 368 | |||
| 369 | /* Go check for the CPU being offline. */ | ||
| 370 | return rcu_implicit_offline_qs(rdp); | ||
| 371 | } | ||
| 372 | |||
| 373 | #endif /* #ifdef CONFIG_SMP */ | ||
| 374 | |||
| 375 | #else /* #ifdef CONFIG_NO_HZ */ | ||
| 376 | |||
| 377 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
| 378 | { | ||
| 379 | } | ||
| 380 | |||
| 381 | #ifdef CONFIG_SMP | ||
| 382 | |||
| 383 | /* | ||
| 384 | * If there are no dynticks, then the only way that a CPU can passively | ||
| 385 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
| 386 | * is a point in time during the prior (already finished) grace period, | ||
| 387 | * an offline CPU is always in a quiescent state, and thus can be | ||
| 388 | * unconditionally applied. So just return the current value of completed. | ||
| 389 | */ | ||
| 390 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
| 391 | { | ||
| 392 | return rsp->completed; | ||
| 393 | } | ||
| 394 | |||
| 395 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | ||
| 396 | { | ||
| 397 | return 0; | ||
| 398 | } | ||
| 399 | |||
| 400 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | ||
| 401 | { | ||
| 402 | return rcu_implicit_offline_qs(rdp); | ||
| 403 | } | ||
| 404 | |||
| 405 | #endif /* #ifdef CONFIG_SMP */ | ||
| 406 | |||
| 407 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
| 408 | |||
| 409 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 410 | |||
| 411 | static void record_gp_stall_check_time(struct rcu_state *rsp) | ||
| 412 | { | ||
| 413 | rsp->gp_start = jiffies; | ||
| 414 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; | ||
| 415 | } | ||
| 416 | |||
| 417 | static void print_other_cpu_stall(struct rcu_state *rsp) | ||
| 418 | { | ||
| 419 | int cpu; | ||
| 420 | long delta; | ||
| 421 | unsigned long flags; | ||
| 422 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
| 423 | struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | ||
| 424 | struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
| 425 | |||
| 426 | /* Only let one CPU complain about others per time interval. */ | ||
| 427 | |||
| 428 | spin_lock_irqsave(&rnp->lock, flags); | ||
| 429 | delta = jiffies - rsp->jiffies_stall; | ||
| 430 | if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { | ||
| 431 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 432 | return; | ||
| 433 | } | ||
| 434 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
| 435 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 436 | |||
| 437 | /* OK, time to rat on our buddy... */ | ||
| 438 | |||
| 439 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | ||
| 440 | for (; rnp_cur < rnp_end; rnp_cur++) { | ||
| 441 | if (rnp_cur->qsmask == 0) | ||
| 442 | continue; | ||
| 443 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | ||
| 444 | if (rnp_cur->qsmask & (1UL << cpu)) | ||
| 445 | printk(" %d", rnp_cur->grplo + cpu); | ||
| 446 | } | ||
| 447 | printk(" (detected by %d, t=%ld jiffies)\n", | ||
| 448 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | ||
| 449 | force_quiescent_state(rsp, 0); /* Kick them all. */ | ||
| 450 | } | ||
| 451 | |||
| 452 | static void print_cpu_stall(struct rcu_state *rsp) | ||
| 453 | { | ||
| 454 | unsigned long flags; | ||
| 455 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
| 456 | |||
| 457 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | ||
| 458 | smp_processor_id(), jiffies - rsp->gp_start); | ||
| 459 | dump_stack(); | ||
| 460 | spin_lock_irqsave(&rnp->lock, flags); | ||
| 461 | if ((long)(jiffies - rsp->jiffies_stall) >= 0) | ||
| 462 | rsp->jiffies_stall = | ||
| 463 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
| 464 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 465 | set_need_resched(); /* kick ourselves to get things going. */ | ||
| 466 | } | ||
| 467 | |||
| 468 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 469 | { | ||
| 470 | long delta; | ||
| 471 | struct rcu_node *rnp; | ||
| 472 | |||
| 473 | delta = jiffies - rsp->jiffies_stall; | ||
| 474 | rnp = rdp->mynode; | ||
| 475 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | ||
| 476 | |||
| 477 | /* We haven't checked in, so go dump stack. */ | ||
| 478 | print_cpu_stall(rsp); | ||
| 479 | |||
| 480 | } else if (rsp->gpnum != rsp->completed && | ||
| 481 | delta >= RCU_STALL_RAT_DELAY) { | ||
| 482 | |||
| 483 | /* They had two time units to dump stack, so complain. */ | ||
| 484 | print_other_cpu_stall(rsp); | ||
| 485 | } | ||
| 486 | } | ||
| 487 | |||
| 488 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 489 | |||
| 490 | static void record_gp_stall_check_time(struct rcu_state *rsp) | ||
| 491 | { | ||
| 492 | } | ||
| 493 | |||
| 494 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 495 | { | ||
| 496 | } | ||
| 497 | |||
| 498 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 499 | |||
| 500 | /* | ||
| 501 | * Update CPU-local rcu_data state to record the newly noticed grace period. | ||
| 502 | * This is used both when we started the grace period and when we notice | ||
| 503 | * that someone else started the grace period. | ||
| 504 | */ | ||
| 505 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 506 | { | ||
| 507 | rdp->qs_pending = 1; | ||
| 508 | rdp->passed_quiesc = 0; | ||
| 509 | rdp->gpnum = rsp->gpnum; | ||
| 510 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
| 511 | RCU_JIFFIES_TILL_FORCE_QS; | ||
| 512 | } | ||
| 513 | |||
| 514 | /* | ||
| 515 | * Did someone else start a new RCU grace period start since we last | ||
| 516 | * checked? Update local state appropriately if so. Must be called | ||
| 517 | * on the CPU corresponding to rdp. | ||
| 518 | */ | ||
| 519 | static int | ||
| 520 | check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 521 | { | ||
| 522 | unsigned long flags; | ||
| 523 | int ret = 0; | ||
| 524 | |||
| 525 | local_irq_save(flags); | ||
| 526 | if (rdp->gpnum != rsp->gpnum) { | ||
| 527 | note_new_gpnum(rsp, rdp); | ||
| 528 | ret = 1; | ||
| 529 | } | ||
| 530 | local_irq_restore(flags); | ||
| 531 | return ret; | ||
| 532 | } | ||
| 533 | |||
| 534 | /* | ||
| 535 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | ||
| 536 | * in preparation for detecting the next grace period. The caller must hold | ||
| 537 | * the root node's ->lock, which is released before return. Hard irqs must | ||
| 538 | * be disabled. | ||
| 539 | */ | ||
| 540 | static void | ||
| 541 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | ||
| 542 | __releases(rcu_get_root(rsp)->lock) | ||
| 543 | { | ||
| 544 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
| 545 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
| 546 | struct rcu_node *rnp_cur; | ||
| 547 | struct rcu_node *rnp_end; | ||
| 548 | |||
| 549 | if (!cpu_needs_another_gp(rsp, rdp)) { | ||
| 550 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 551 | return; | ||
| 552 | } | ||
| 553 | |||
| 554 | /* Advance to a new grace period and initialize state. */ | ||
| 555 | rsp->gpnum++; | ||
| 556 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | ||
| 557 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | ||
| 558 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
| 559 | RCU_JIFFIES_TILL_FORCE_QS; | ||
| 560 | record_gp_stall_check_time(rsp); | ||
| 561 | dyntick_record_completed(rsp, rsp->completed - 1); | ||
| 562 | note_new_gpnum(rsp, rdp); | ||
| 563 | |||
| 564 | /* | ||
| 565 | * Because we are first, we know that all our callbacks will | ||
| 566 | * be covered by this upcoming grace period, even the ones | ||
| 567 | * that were registered arbitrarily recently. | ||
| 568 | */ | ||
| 569 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 570 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 571 | |||
| 572 | /* Special-case the common single-level case. */ | ||
| 573 | if (NUM_RCU_NODES == 1) { | ||
| 574 | rnp->qsmask = rnp->qsmaskinit; | ||
| 575 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 576 | return; | ||
| 577 | } | ||
| 578 | |||
| 579 | spin_unlock(&rnp->lock); /* leave irqs disabled. */ | ||
| 580 | |||
| 581 | |||
| 582 | /* Exclude any concurrent CPU-hotplug operations. */ | ||
| 583 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | ||
| 584 | |||
| 585 | /* | ||
| 586 | * Set the quiescent-state-needed bits in all the non-leaf RCU | ||
| 587 | * nodes for all currently online CPUs. This operation relies | ||
| 588 | * on the layout of the hierarchy within the rsp->node[] array. | ||
| 589 | * Note that other CPUs will access only the leaves of the | ||
| 590 | * hierarchy, which still indicate that no grace period is in | ||
| 591 | * progress. In addition, we have excluded CPU-hotplug operations. | ||
| 592 | * | ||
| 593 | * We therefore do not need to hold any locks. Any required | ||
| 594 | * memory barriers will be supplied by the locks guarding the | ||
| 595 | * leaf rcu_nodes in the hierarchy. | ||
| 596 | */ | ||
| 597 | |||
| 598 | rnp_end = rsp->level[NUM_RCU_LVLS - 1]; | ||
| 599 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) | ||
| 600 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | ||
| 601 | |||
| 602 | /* | ||
| 603 | * Now set up the leaf nodes. Here we must be careful. First, | ||
| 604 | * we need to hold the lock in order to exclude other CPUs, which | ||
| 605 | * might be contending for the leaf nodes' locks. Second, as | ||
| 606 | * soon as we initialize a given leaf node, its CPUs might run | ||
| 607 | * up the rest of the hierarchy. We must therefore acquire locks | ||
| 608 | * for each node that we touch during this stage. (But we still | ||
| 609 | * are excluding CPU-hotplug operations.) | ||
| 610 | * | ||
| 611 | * Note that the grace period cannot complete until we finish | ||
| 612 | * the initialization process, as there will be at least one | ||
| 613 | * qsmask bit set in the root node until that time, namely the | ||
| 614 | * one corresponding to this CPU. | ||
| 615 | */ | ||
| 616 | rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
| 617 | rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | ||
| 618 | for (; rnp_cur < rnp_end; rnp_cur++) { | ||
| 619 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ | ||
| 620 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | ||
| 621 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ | ||
| 622 | } | ||
| 623 | |||
| 624 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | ||
| 625 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
| 626 | } | ||
| 627 | |||
| 628 | /* | ||
| 629 | * Advance this CPU's callbacks, but only if the current grace period | ||
| 630 | * has ended. This may be called only from the CPU to whom the rdp | ||
| 631 | * belongs. | ||
| 632 | */ | ||
| 633 | static void | ||
| 634 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 635 | { | ||
| 636 | long completed_snap; | ||
| 637 | unsigned long flags; | ||
| 638 | |||
| 639 | local_irq_save(flags); | ||
| 640 | completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | ||
| 641 | |||
| 642 | /* Did another grace period end? */ | ||
| 643 | if (rdp->completed != completed_snap) { | ||
| 644 | |||
| 645 | /* Advance callbacks. No harm if list empty. */ | ||
| 646 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
| 647 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
| 648 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 649 | |||
| 650 | /* Remember that we saw this grace-period completion. */ | ||
| 651 | rdp->completed = completed_snap; | ||
| 652 | } | ||
| 653 | local_irq_restore(flags); | ||
| 654 | } | ||
| 655 | |||
| 656 | /* | ||
| 657 | * Similar to cpu_quiet(), for which it is a helper function. Allows | ||
| 658 | * a group of CPUs to be quieted at one go, though all the CPUs in the | ||
| 659 | * group must be represented by the same leaf rcu_node structure. | ||
| 660 | * That structure's lock must be held upon entry, and it is released | ||
| 661 | * before return. | ||
| 662 | */ | ||
| 663 | static void | ||
| 664 | cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | ||
| 665 | unsigned long flags) | ||
| 666 | __releases(rnp->lock) | ||
| 667 | { | ||
| 668 | /* Walk up the rcu_node hierarchy. */ | ||
| 669 | for (;;) { | ||
| 670 | if (!(rnp->qsmask & mask)) { | ||
| 671 | |||
| 672 | /* Our bit has already been cleared, so done. */ | ||
| 673 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 674 | return; | ||
| 675 | } | ||
| 676 | rnp->qsmask &= ~mask; | ||
| 677 | if (rnp->qsmask != 0) { | ||
| 678 | |||
| 679 | /* Other bits still set at this level, so done. */ | ||
| 680 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 681 | return; | ||
| 682 | } | ||
| 683 | mask = rnp->grpmask; | ||
| 684 | if (rnp->parent == NULL) { | ||
| 685 | |||
| 686 | /* No more levels. Exit loop holding root lock. */ | ||
| 687 | |||
| 688 | break; | ||
| 689 | } | ||
| 690 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 691 | rnp = rnp->parent; | ||
| 692 | spin_lock_irqsave(&rnp->lock, flags); | ||
| 693 | } | ||
| 694 | |||
| 695 | /* | ||
| 696 | * Get here if we are the last CPU to pass through a quiescent | ||
| 697 | * state for this grace period. Clean up and let rcu_start_gp() | ||
| 698 | * start up the next grace period if one is needed. Note that | ||
| 699 | * we still hold rnp->lock, as required by rcu_start_gp(), which | ||
| 700 | * will release it. | ||
| 701 | */ | ||
| 702 | rsp->completed = rsp->gpnum; | ||
| 703 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
| 704 | rcu_start_gp(rsp, flags); /* releases rnp->lock. */ | ||
| 705 | } | ||
| 706 | |||
| 707 | /* | ||
| 708 | * Record a quiescent state for the specified CPU, which must either be | ||
| 709 | * the current CPU or an offline CPU. The lastcomp argument is used to | ||
| 710 | * make sure we are still in the grace period of interest. We don't want | ||
| 711 | * to end the current grace period based on quiescent states detected in | ||
| 712 | * an earlier grace period! | ||
| 713 | */ | ||
| 714 | static void | ||
| 715 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | ||
| 716 | { | ||
| 717 | unsigned long flags; | ||
| 718 | unsigned long mask; | ||
| 719 | struct rcu_node *rnp; | ||
| 720 | |||
| 721 | rnp = rdp->mynode; | ||
| 722 | spin_lock_irqsave(&rnp->lock, flags); | ||
| 723 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | ||
| 724 | |||
| 725 | /* | ||
| 726 | * Someone beat us to it for this grace period, so leave. | ||
| 727 | * The race with GP start is resolved by the fact that we | ||
| 728 | * hold the leaf rcu_node lock, so that the per-CPU bits | ||
| 729 | * cannot yet be initialized -- so we would simply find our | ||
| 730 | * CPU's bit already cleared in cpu_quiet_msk() if this race | ||
| 731 | * occurred. | ||
| 732 | */ | ||
| 733 | rdp->passed_quiesc = 0; /* try again later! */ | ||
| 734 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 735 | return; | ||
| 736 | } | ||
| 737 | mask = rdp->grpmask; | ||
| 738 | if ((rnp->qsmask & mask) == 0) { | ||
| 739 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 740 | } else { | ||
| 741 | rdp->qs_pending = 0; | ||
| 742 | |||
| 743 | /* | ||
| 744 | * This GP can't end until cpu checks in, so all of our | ||
| 745 | * callbacks can be processed during the next GP. | ||
| 746 | */ | ||
| 747 | rdp = rsp->rda[smp_processor_id()]; | ||
| 748 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 749 | |||
| 750 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | ||
| 751 | } | ||
| 752 | } | ||
| 753 | |||
| 754 | /* | ||
| 755 | * Check to see if there is a new grace period of which this CPU | ||
| 756 | * is not yet aware, and if so, set up local rcu_data state for it. | ||
| 757 | * Otherwise, see if this CPU has just passed through its first | ||
| 758 | * quiescent state for this grace period, and record that fact if so. | ||
| 759 | */ | ||
| 760 | static void | ||
| 761 | rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 762 | { | ||
| 763 | /* If there is now a new grace period, record and return. */ | ||
| 764 | if (check_for_new_grace_period(rsp, rdp)) | ||
| 765 | return; | ||
| 766 | |||
| 767 | /* | ||
| 768 | * Does this CPU still need to do its part for current grace period? | ||
| 769 | * If no, return and let the other CPUs do their part as well. | ||
| 770 | */ | ||
| 771 | if (!rdp->qs_pending) | ||
| 772 | return; | ||
| 773 | |||
| 774 | /* | ||
| 775 | * Was there a quiescent state since the beginning of the grace | ||
| 776 | * period? If no, then exit and wait for the next call. | ||
| 777 | */ | ||
| 778 | if (!rdp->passed_quiesc) | ||
| 779 | return; | ||
| 780 | |||
| 781 | /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ | ||
| 782 | cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | ||
| 783 | } | ||
| 784 | |||
| 785 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 786 | |||
| 787 | /* | ||
| 788 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | ||
| 789 | * and move all callbacks from the outgoing CPU to the current one. | ||
| 790 | */ | ||
| 791 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | ||
| 792 | { | ||
| 793 | int i; | ||
| 794 | unsigned long flags; | ||
| 795 | long lastcomp; | ||
| 796 | unsigned long mask; | ||
| 797 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
| 798 | struct rcu_data *rdp_me; | ||
| 799 | struct rcu_node *rnp; | ||
| 800 | |||
| 801 | /* Exclude any attempts to start a new grace period. */ | ||
| 802 | spin_lock_irqsave(&rsp->onofflock, flags); | ||
| 803 | |||
| 804 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | ||
| 805 | rnp = rdp->mynode; | ||
| 806 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | ||
| 807 | do { | ||
| 808 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
| 809 | rnp->qsmaskinit &= ~mask; | ||
| 810 | if (rnp->qsmaskinit != 0) { | ||
| 811 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | ||
| 812 | break; | ||
| 813 | } | ||
| 814 | mask = rnp->grpmask; | ||
| 815 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | ||
| 816 | rnp = rnp->parent; | ||
| 817 | } while (rnp != NULL); | ||
| 818 | lastcomp = rsp->completed; | ||
| 819 | |||
| 820 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
| 821 | |||
| 822 | /* Being offline is a quiescent state, so go record it. */ | ||
| 823 | cpu_quiet(cpu, rsp, rdp, lastcomp); | ||
| 824 | |||
| 825 | /* | ||
| 826 | * Move callbacks from the outgoing CPU to the running CPU. | ||
| 827 | * Note that the outgoing CPU is now quiscent, so it is now | ||
| 828 | * (uncharacteristically) safe to access it rcu_data structure. | ||
| 829 | * Note also that we must carefully retain the order of the | ||
| 830 | * outgoing CPU's callbacks in order for rcu_barrier() to work | ||
| 831 | * correctly. Finally, note that we start all the callbacks | ||
| 832 | * afresh, even those that have passed through a grace period | ||
| 833 | * and are therefore ready to invoke. The theory is that hotplug | ||
| 834 | * events are rare, and that if they are frequent enough to | ||
| 835 | * indefinitely delay callbacks, you have far worse things to | ||
| 836 | * be worrying about. | ||
| 837 | */ | ||
| 838 | rdp_me = rsp->rda[smp_processor_id()]; | ||
| 839 | if (rdp->nxtlist != NULL) { | ||
| 840 | *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; | ||
| 841 | rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 842 | rdp->nxtlist = NULL; | ||
| 843 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
| 844 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 845 | rdp_me->qlen += rdp->qlen; | ||
| 846 | rdp->qlen = 0; | ||
| 847 | } | ||
| 848 | local_irq_restore(flags); | ||
| 849 | } | ||
| 850 | |||
| 851 | /* | ||
| 852 | * Remove the specified CPU from the RCU hierarchy and move any pending | ||
| 853 | * callbacks that it might have to the current CPU. This code assumes | ||
| 854 | * that at least one CPU in the system will remain running at all times. | ||
| 855 | * Any attempt to offline -all- CPUs is likely to strand RCU callbacks. | ||
| 856 | */ | ||
| 857 | static void rcu_offline_cpu(int cpu) | ||
| 858 | { | ||
| 859 | __rcu_offline_cpu(cpu, &rcu_state); | ||
| 860 | __rcu_offline_cpu(cpu, &rcu_bh_state); | ||
| 861 | } | ||
| 862 | |||
| 863 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 864 | |||
| 865 | static void rcu_offline_cpu(int cpu) | ||
| 866 | { | ||
| 867 | } | ||
| 868 | |||
| 869 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 870 | |||
| 871 | /* | ||
| 872 | * Invoke any RCU callbacks that have made it to the end of their grace | ||
| 873 | * period. Thottle as specified by rdp->blimit. | ||
| 874 | */ | ||
| 875 | static void rcu_do_batch(struct rcu_data *rdp) | ||
| 876 | { | ||
| 877 | unsigned long flags; | ||
| 878 | struct rcu_head *next, *list, **tail; | ||
| 879 | int count; | ||
| 880 | |||
| 881 | /* If no callbacks are ready, just return.*/ | ||
| 882 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) | ||
| 883 | return; | ||
| 884 | |||
| 885 | /* | ||
| 886 | * Extract the list of ready callbacks, disabling to prevent | ||
| 887 | * races with call_rcu() from interrupt handlers. | ||
| 888 | */ | ||
| 889 | local_irq_save(flags); | ||
| 890 | list = rdp->nxtlist; | ||
| 891 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; | ||
| 892 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; | ||
| 893 | tail = rdp->nxttail[RCU_DONE_TAIL]; | ||
| 894 | for (count = RCU_NEXT_SIZE - 1; count >= 0; count--) | ||
| 895 | if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL]) | ||
| 896 | rdp->nxttail[count] = &rdp->nxtlist; | ||
| 897 | local_irq_restore(flags); | ||
| 898 | |||
| 899 | /* Invoke callbacks. */ | ||
| 900 | count = 0; | ||
| 901 | while (list) { | ||
| 902 | next = list->next; | ||
| 903 | prefetch(next); | ||
| 904 | list->func(list); | ||
| 905 | list = next; | ||
| 906 | if (++count >= rdp->blimit) | ||
| 907 | break; | ||
| 908 | } | ||
| 909 | |||
| 910 | local_irq_save(flags); | ||
| 911 | |||
| 912 | /* Update count, and requeue any remaining callbacks. */ | ||
| 913 | rdp->qlen -= count; | ||
| 914 | if (list != NULL) { | ||
| 915 | *tail = rdp->nxtlist; | ||
| 916 | rdp->nxtlist = list; | ||
| 917 | for (count = 0; count < RCU_NEXT_SIZE; count++) | ||
| 918 | if (&rdp->nxtlist == rdp->nxttail[count]) | ||
| 919 | rdp->nxttail[count] = tail; | ||
| 920 | else | ||
| 921 | break; | ||
| 922 | } | ||
| 923 | |||
| 924 | /* Reinstate batch limit if we have worked down the excess. */ | ||
| 925 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | ||
| 926 | rdp->blimit = blimit; | ||
| 927 | |||
| 928 | local_irq_restore(flags); | ||
| 929 | |||
| 930 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | ||
| 931 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | ||
| 932 | raise_softirq(RCU_SOFTIRQ); | ||
| 933 | } | ||
| 934 | |||
| 935 | /* | ||
| 936 | * Check to see if this CPU is in a non-context-switch quiescent state | ||
| 937 | * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). | ||
| 938 | * Also schedule the RCU softirq handler. | ||
| 939 | * | ||
| 940 | * This function must be called with hardirqs disabled. It is normally | ||
| 941 | * invoked from the scheduling-clock interrupt. If rcu_pending returns | ||
| 942 | * false, there is no point in invoking rcu_check_callbacks(). | ||
| 943 | */ | ||
| 944 | void rcu_check_callbacks(int cpu, int user) | ||
| 945 | { | ||
| 946 | if (user || | ||
| 947 | (idle_cpu(cpu) && !in_softirq() && | ||
| 948 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | ||
| 949 | |||
| 950 | /* | ||
| 951 | * Get here if this CPU took its interrupt from user | ||
| 952 | * mode or from the idle loop, and if this is not a | ||
| 953 | * nested interrupt. In this case, the CPU is in | ||
| 954 | * a quiescent state, so count it. | ||
| 955 | * | ||
| 956 | * No memory barrier is required here because both | ||
| 957 | * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference | ||
| 958 | * only CPU-local variables that other CPUs neither | ||
| 959 | * access nor modify, at least not while the corresponding | ||
| 960 | * CPU is online. | ||
| 961 | */ | ||
| 962 | |||
| 963 | rcu_qsctr_inc(cpu); | ||
| 964 | rcu_bh_qsctr_inc(cpu); | ||
| 965 | |||
| 966 | } else if (!in_softirq()) { | ||
| 967 | |||
| 968 | /* | ||
| 969 | * Get here if this CPU did not take its interrupt from | ||
| 970 | * softirq, in other words, if it is not interrupting | ||
| 971 | * a rcu_bh read-side critical section. This is an _bh | ||
| 972 | * critical section, so count it. | ||
| 973 | */ | ||
| 974 | |||
| 975 | rcu_bh_qsctr_inc(cpu); | ||
| 976 | } | ||
| 977 | raise_softirq(RCU_SOFTIRQ); | ||
| 978 | } | ||
| 979 | |||
| 980 | #ifdef CONFIG_SMP | ||
| 981 | |||
| 982 | /* | ||
| 983 | * Scan the leaf rcu_node structures, processing dyntick state for any that | ||
| 984 | * have not yet encountered a quiescent state, using the function specified. | ||
| 985 | * Returns 1 if the current grace period ends while scanning (possibly | ||
| 986 | * because we made it end). | ||
| 987 | */ | ||
| 988 | static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | ||
| 989 | int (*f)(struct rcu_data *)) | ||
| 990 | { | ||
| 991 | unsigned long bit; | ||
| 992 | int cpu; | ||
| 993 | unsigned long flags; | ||
| 994 | unsigned long mask; | ||
| 995 | struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | ||
| 996 | struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
| 997 | |||
| 998 | for (; rnp_cur < rnp_end; rnp_cur++) { | ||
| 999 | mask = 0; | ||
| 1000 | spin_lock_irqsave(&rnp_cur->lock, flags); | ||
| 1001 | if (rsp->completed != lastcomp) { | ||
| 1002 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | ||
| 1003 | return 1; | ||
| 1004 | } | ||
| 1005 | if (rnp_cur->qsmask == 0) { | ||
| 1006 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | ||
| 1007 | continue; | ||
| 1008 | } | ||
| 1009 | cpu = rnp_cur->grplo; | ||
| 1010 | bit = 1; | ||
| 1011 | for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { | ||
| 1012 | if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) | ||
| 1013 | mask |= bit; | ||
| 1014 | } | ||
| 1015 | if (mask != 0 && rsp->completed == lastcomp) { | ||
| 1016 | |||
| 1017 | /* cpu_quiet_msk() releases rnp_cur->lock. */ | ||
| 1018 | cpu_quiet_msk(mask, rsp, rnp_cur, flags); | ||
| 1019 | continue; | ||
| 1020 | } | ||
| 1021 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | ||
| 1022 | } | ||
| 1023 | return 0; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | /* | ||
| 1027 | * Force quiescent states on reluctant CPUs, and also detect which | ||
| 1028 | * CPUs are in dyntick-idle mode. | ||
| 1029 | */ | ||
| 1030 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | ||
| 1031 | { | ||
| 1032 | unsigned long flags; | ||
| 1033 | long lastcomp; | ||
| 1034 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
| 1035 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
| 1036 | u8 signaled; | ||
| 1037 | |||
| 1038 | if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) | ||
| 1039 | return; /* No grace period in progress, nothing to force. */ | ||
| 1040 | if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { | ||
| 1041 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ | ||
| 1042 | return; /* Someone else is already on the job. */ | ||
| 1043 | } | ||
| 1044 | if (relaxed && | ||
| 1045 | (long)(rsp->jiffies_force_qs - jiffies) >= 0 && | ||
| 1046 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) | ||
| 1047 | goto unlock_ret; /* no emergency and done recently. */ | ||
| 1048 | rsp->n_force_qs++; | ||
| 1049 | spin_lock(&rnp->lock); | ||
| 1050 | lastcomp = rsp->completed; | ||
| 1051 | signaled = rsp->signaled; | ||
| 1052 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | ||
| 1053 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
| 1054 | RCU_JIFFIES_TILL_FORCE_QS; | ||
| 1055 | if (lastcomp == rsp->gpnum) { | ||
| 1056 | rsp->n_force_qs_ngp++; | ||
| 1057 | spin_unlock(&rnp->lock); | ||
| 1058 | goto unlock_ret; /* no GP in progress, time updated. */ | ||
| 1059 | } | ||
| 1060 | spin_unlock(&rnp->lock); | ||
| 1061 | switch (signaled) { | ||
| 1062 | case RCU_GP_INIT: | ||
| 1063 | |||
| 1064 | break; /* grace period still initializing, ignore. */ | ||
| 1065 | |||
| 1066 | case RCU_SAVE_DYNTICK: | ||
| 1067 | |||
| 1068 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) | ||
| 1069 | break; /* So gcc recognizes the dead code. */ | ||
| 1070 | |||
| 1071 | /* Record dyntick-idle state. */ | ||
| 1072 | if (rcu_process_dyntick(rsp, lastcomp, | ||
| 1073 | dyntick_save_progress_counter)) | ||
| 1074 | goto unlock_ret; | ||
| 1075 | |||
| 1076 | /* Update state, record completion counter. */ | ||
| 1077 | spin_lock(&rnp->lock); | ||
| 1078 | if (lastcomp == rsp->completed) { | ||
| 1079 | rsp->signaled = RCU_FORCE_QS; | ||
| 1080 | dyntick_record_completed(rsp, lastcomp); | ||
| 1081 | } | ||
| 1082 | spin_unlock(&rnp->lock); | ||
| 1083 | break; | ||
| 1084 | |||
| 1085 | case RCU_FORCE_QS: | ||
| 1086 | |||
| 1087 | /* Check dyntick-idle state, send IPI to laggarts. */ | ||
| 1088 | if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), | ||
| 1089 | rcu_implicit_dynticks_qs)) | ||
| 1090 | goto unlock_ret; | ||
| 1091 | |||
| 1092 | /* Leave state in case more forcing is required. */ | ||
| 1093 | |||
| 1094 | break; | ||
| 1095 | } | ||
| 1096 | unlock_ret: | ||
| 1097 | spin_unlock_irqrestore(&rsp->fqslock, flags); | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | #else /* #ifdef CONFIG_SMP */ | ||
| 1101 | |||
| 1102 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | ||
| 1103 | { | ||
| 1104 | set_need_resched(); | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | #endif /* #else #ifdef CONFIG_SMP */ | ||
| 1108 | |||
| 1109 | /* | ||
| 1110 | * This does the RCU processing work from softirq context for the | ||
| 1111 | * specified rcu_state and rcu_data structures. This may be called | ||
| 1112 | * only from the CPU to whom the rdp belongs. | ||
| 1113 | */ | ||
| 1114 | static void | ||
| 1115 | __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 1116 | { | ||
| 1117 | unsigned long flags; | ||
| 1118 | |||
| 1119 | /* | ||
| 1120 | * If an RCU GP has gone long enough, go check for dyntick | ||
| 1121 | * idle CPUs and, if needed, send resched IPIs. | ||
| 1122 | */ | ||
| 1123 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | ||
| 1124 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
| 1125 | force_quiescent_state(rsp, 1); | ||
| 1126 | |||
| 1127 | /* | ||
| 1128 | * Advance callbacks in response to end of earlier grace | ||
| 1129 | * period that some other CPU ended. | ||
| 1130 | */ | ||
| 1131 | rcu_process_gp_end(rsp, rdp); | ||
| 1132 | |||
| 1133 | /* Update RCU state based on any recent quiescent states. */ | ||
| 1134 | rcu_check_quiescent_state(rsp, rdp); | ||
| 1135 | |||
| 1136 | /* Does this CPU require a not-yet-started grace period? */ | ||
| 1137 | if (cpu_needs_another_gp(rsp, rdp)) { | ||
| 1138 | spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); | ||
| 1139 | rcu_start_gp(rsp, flags); /* releases above lock */ | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | /* If there are callbacks ready, invoke them. */ | ||
| 1143 | rcu_do_batch(rdp); | ||
| 1144 | } | ||
| 1145 | |||
| 1146 | /* | ||
| 1147 | * Do softirq processing for the current CPU. | ||
| 1148 | */ | ||
| 1149 | static void rcu_process_callbacks(struct softirq_action *unused) | ||
| 1150 | { | ||
| 1151 | /* | ||
| 1152 | * Memory references from any prior RCU read-side critical sections | ||
| 1153 | * executed by the interrupted code must be seen before any RCU | ||
| 1154 | * grace-period manipulations below. | ||
| 1155 | */ | ||
| 1156 | smp_mb(); /* See above block comment. */ | ||
| 1157 | |||
| 1158 | __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); | ||
| 1159 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | ||
| 1160 | |||
| 1161 | /* | ||
| 1162 | * Memory references from any later RCU read-side critical sections | ||
| 1163 | * executed by the interrupted code must be seen after any RCU | ||
| 1164 | * grace-period manipulations above. | ||
| 1165 | */ | ||
| 1166 | smp_mb(); /* See above block comment. */ | ||
| 1167 | } | ||
| 1168 | |||
| 1169 | static void | ||
| 1170 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | ||
| 1171 | struct rcu_state *rsp) | ||
| 1172 | { | ||
| 1173 | unsigned long flags; | ||
| 1174 | struct rcu_data *rdp; | ||
| 1175 | |||
| 1176 | head->func = func; | ||
| 1177 | head->next = NULL; | ||
| 1178 | |||
| 1179 | smp_mb(); /* Ensure RCU update seen before callback registry. */ | ||
| 1180 | |||
| 1181 | /* | ||
| 1182 | * Opportunistically note grace-period endings and beginnings. | ||
| 1183 | * Note that we might see a beginning right after we see an | ||
| 1184 | * end, but never vice versa, since this CPU has to pass through | ||
| 1185 | * a quiescent state betweentimes. | ||
| 1186 | */ | ||
| 1187 | local_irq_save(flags); | ||
| 1188 | rdp = rsp->rda[smp_processor_id()]; | ||
| 1189 | rcu_process_gp_end(rsp, rdp); | ||
| 1190 | check_for_new_grace_period(rsp, rdp); | ||
| 1191 | |||
| 1192 | /* Add the callback to our list. */ | ||
| 1193 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | ||
| 1194 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | ||
| 1195 | |||
| 1196 | /* Start a new grace period if one not already started. */ | ||
| 1197 | if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { | ||
| 1198 | unsigned long nestflag; | ||
| 1199 | struct rcu_node *rnp_root = rcu_get_root(rsp); | ||
| 1200 | |||
| 1201 | spin_lock_irqsave(&rnp_root->lock, nestflag); | ||
| 1202 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | /* Force the grace period if too many callbacks or too long waiting. */ | ||
| 1206 | if (unlikely(++rdp->qlen > qhimark)) { | ||
| 1207 | rdp->blimit = LONG_MAX; | ||
| 1208 | force_quiescent_state(rsp, 0); | ||
| 1209 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | ||
| 1210 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
| 1211 | force_quiescent_state(rsp, 1); | ||
| 1212 | local_irq_restore(flags); | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | /* | ||
| 1216 | * Queue an RCU callback for invocation after a grace period. | ||
| 1217 | */ | ||
| 1218 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 1219 | { | ||
| 1220 | __call_rcu(head, func, &rcu_state); | ||
| 1221 | } | ||
| 1222 | EXPORT_SYMBOL_GPL(call_rcu); | ||
| 1223 | |||
| 1224 | /* | ||
| 1225 | * Queue an RCU for invocation after a quicker grace period. | ||
| 1226 | */ | ||
| 1227 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 1228 | { | ||
| 1229 | __call_rcu(head, func, &rcu_bh_state); | ||
| 1230 | } | ||
| 1231 | EXPORT_SYMBOL_GPL(call_rcu_bh); | ||
| 1232 | |||
| 1233 | /* | ||
| 1234 | * Check to see if there is any immediate RCU-related work to be done | ||
| 1235 | * by the current CPU, for the specified type of RCU, returning 1 if so. | ||
| 1236 | * The checks are in order of increasing expense: checks that can be | ||
| 1237 | * carried out against CPU-local state are performed first. However, | ||
| 1238 | * we must check for CPU stalls first, else we might not get a chance. | ||
| 1239 | */ | ||
| 1240 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | ||
| 1241 | { | ||
| 1242 | rdp->n_rcu_pending++; | ||
| 1243 | |||
| 1244 | /* Check for CPU stalls, if enabled. */ | ||
| 1245 | check_cpu_stall(rsp, rdp); | ||
| 1246 | |||
| 1247 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | ||
| 1248 | if (rdp->qs_pending) | ||
| 1249 | return 1; | ||
| 1250 | |||
| 1251 | /* Does this CPU have callbacks ready to invoke? */ | ||
| 1252 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | ||
| 1253 | return 1; | ||
| 1254 | |||
| 1255 | /* Has RCU gone idle with this CPU needing another grace period? */ | ||
| 1256 | if (cpu_needs_another_gp(rsp, rdp)) | ||
| 1257 | return 1; | ||
| 1258 | |||
| 1259 | /* Has another RCU grace period completed? */ | ||
| 1260 | if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */ | ||
| 1261 | return 1; | ||
| 1262 | |||
| 1263 | /* Has a new RCU grace period started? */ | ||
| 1264 | if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */ | ||
| 1265 | return 1; | ||
| 1266 | |||
| 1267 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | ||
| 1268 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && | ||
| 1269 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | ||
| 1270 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) | ||
| 1271 | return 1; | ||
| 1272 | |||
| 1273 | /* nothing to do */ | ||
| 1274 | return 0; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | /* | ||
| 1278 | * Check to see if there is any immediate RCU-related work to be done | ||
| 1279 | * by the current CPU, returning 1 if so. This function is part of the | ||
| 1280 | * RCU implementation; it is -not- an exported member of the RCU API. | ||
| 1281 | */ | ||
| 1282 | int rcu_pending(int cpu) | ||
| 1283 | { | ||
| 1284 | return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || | ||
| 1285 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); | ||
| 1286 | } | ||
| 1287 | |||
| 1288 | /* | ||
| 1289 | * Check to see if any future RCU-related work will need to be done | ||
| 1290 | * by the current CPU, even if none need be done immediately, returning | ||
| 1291 | * 1 if so. This function is part of the RCU implementation; it is -not- | ||
| 1292 | * an exported member of the RCU API. | ||
| 1293 | */ | ||
| 1294 | int rcu_needs_cpu(int cpu) | ||
| 1295 | { | ||
| 1296 | /* RCU callbacks either ready or pending? */ | ||
| 1297 | return per_cpu(rcu_data, cpu).nxtlist || | ||
| 1298 | per_cpu(rcu_bh_data, cpu).nxtlist; | ||
| 1299 | } | ||
| 1300 | |||
| 1301 | /* | ||
| 1302 | * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" | ||
| 1303 | * approach so that we don't have to worry about how long the CPU has | ||
| 1304 | * been gone, or whether it ever was online previously. We do trust the | ||
| 1305 | * ->mynode field, as it is constant for a given struct rcu_data and | ||
| 1306 | * initialized during early boot. | ||
| 1307 | * | ||
| 1308 | * Note that only one online or offline event can be happening at a given | ||
| 1309 | * time. Note also that we can accept some slop in the rsp->completed | ||
| 1310 | * access due to the fact that this CPU cannot possibly have any RCU | ||
| 1311 | * callbacks in flight yet. | ||
| 1312 | */ | ||
| 1313 | static void | ||
| 1314 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | ||
| 1315 | { | ||
| 1316 | unsigned long flags; | ||
| 1317 | int i; | ||
| 1318 | long lastcomp; | ||
| 1319 | unsigned long mask; | ||
| 1320 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
| 1321 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
| 1322 | |||
| 1323 | /* Set up local state, ensuring consistent view of global state. */ | ||
| 1324 | spin_lock_irqsave(&rnp->lock, flags); | ||
| 1325 | lastcomp = rsp->completed; | ||
| 1326 | rdp->completed = lastcomp; | ||
| 1327 | rdp->gpnum = lastcomp; | ||
| 1328 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | ||
| 1329 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | ||
| 1330 | rdp->beenonline = 1; /* We have now been online. */ | ||
| 1331 | rdp->passed_quiesc_completed = lastcomp - 1; | ||
| 1332 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
| 1333 | rdp->nxtlist = NULL; | ||
| 1334 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
| 1335 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 1336 | rdp->qlen = 0; | ||
| 1337 | rdp->blimit = blimit; | ||
| 1338 | #ifdef CONFIG_NO_HZ | ||
| 1339 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
| 1340 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 1341 | rdp->cpu = cpu; | ||
| 1342 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
| 1343 | |||
| 1344 | /* | ||
| 1345 | * A new grace period might start here. If so, we won't be part | ||
| 1346 | * of it, but that is OK, as we are currently in a quiescent state. | ||
| 1347 | */ | ||
| 1348 | |||
| 1349 | /* Exclude any attempts to start a new GP on large systems. */ | ||
| 1350 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | ||
| 1351 | |||
| 1352 | /* Add CPU to rcu_node bitmasks. */ | ||
| 1353 | rnp = rdp->mynode; | ||
| 1354 | mask = rdp->grpmask; | ||
| 1355 | do { | ||
| 1356 | /* Exclude any attempts to start a new GP on small systems. */ | ||
| 1357 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
| 1358 | rnp->qsmaskinit |= mask; | ||
| 1359 | mask = rnp->grpmask; | ||
| 1360 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | ||
| 1361 | rnp = rnp->parent; | ||
| 1362 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | ||
| 1363 | |||
| 1364 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
| 1365 | |||
| 1366 | /* | ||
| 1367 | * A new grace period might start here. If so, we will be part of | ||
| 1368 | * it, and its gpnum will be greater than ours, so we will | ||
| 1369 | * participate. It is also possible for the gpnum to have been | ||
| 1370 | * incremented before this function was called, and the bitmasks | ||
| 1371 | * to not be filled out until now, in which case we will also | ||
| 1372 | * participate due to our gpnum being behind. | ||
| 1373 | */ | ||
| 1374 | |||
| 1375 | /* Since it is coming online, the CPU is in a quiescent state. */ | ||
| 1376 | cpu_quiet(cpu, rsp, rdp, lastcomp); | ||
| 1377 | local_irq_restore(flags); | ||
| 1378 | } | ||
| 1379 | |||
| 1380 | static void __cpuinit rcu_online_cpu(int cpu) | ||
| 1381 | { | ||
| 1382 | #ifdef CONFIG_NO_HZ | ||
| 1383 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
| 1384 | |||
| 1385 | rdtp->dynticks_nesting = 1; | ||
| 1386 | rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */ | ||
| 1387 | rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1; | ||
| 1388 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 1389 | rcu_init_percpu_data(cpu, &rcu_state); | ||
| 1390 | rcu_init_percpu_data(cpu, &rcu_bh_state); | ||
| 1391 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | /* | ||
| 1395 | * Handle CPU online/offline notifcation events. | ||
| 1396 | */ | ||
| 1397 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | ||
| 1398 | unsigned long action, void *hcpu) | ||
| 1399 | { | ||
| 1400 | long cpu = (long)hcpu; | ||
| 1401 | |||
| 1402 | switch (action) { | ||
| 1403 | case CPU_UP_PREPARE: | ||
| 1404 | case CPU_UP_PREPARE_FROZEN: | ||
| 1405 | rcu_online_cpu(cpu); | ||
| 1406 | break; | ||
| 1407 | case CPU_DEAD: | ||
| 1408 | case CPU_DEAD_FROZEN: | ||
| 1409 | case CPU_UP_CANCELED: | ||
| 1410 | case CPU_UP_CANCELED_FROZEN: | ||
| 1411 | rcu_offline_cpu(cpu); | ||
| 1412 | break; | ||
| 1413 | default: | ||
| 1414 | break; | ||
| 1415 | } | ||
| 1416 | return NOTIFY_OK; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | /* | ||
| 1420 | * Compute the per-level fanout, either using the exact fanout specified | ||
| 1421 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. | ||
| 1422 | */ | ||
| 1423 | #ifdef CONFIG_RCU_FANOUT_EXACT | ||
| 1424 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | ||
| 1425 | { | ||
| 1426 | int i; | ||
| 1427 | |||
| 1428 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) | ||
| 1429 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; | ||
| 1430 | } | ||
| 1431 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ | ||
| 1432 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | ||
| 1433 | { | ||
| 1434 | int ccur; | ||
| 1435 | int cprv; | ||
| 1436 | int i; | ||
| 1437 | |||
| 1438 | cprv = NR_CPUS; | ||
| 1439 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { | ||
| 1440 | ccur = rsp->levelcnt[i]; | ||
| 1441 | rsp->levelspread[i] = (cprv + ccur - 1) / ccur; | ||
| 1442 | cprv = ccur; | ||
| 1443 | } | ||
| 1444 | } | ||
| 1445 | #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ | ||
| 1446 | |||
| 1447 | /* | ||
| 1448 | * Helper function for rcu_init() that initializes one rcu_state structure. | ||
| 1449 | */ | ||
| 1450 | static void __init rcu_init_one(struct rcu_state *rsp) | ||
| 1451 | { | ||
| 1452 | int cpustride = 1; | ||
| 1453 | int i; | ||
| 1454 | int j; | ||
| 1455 | struct rcu_node *rnp; | ||
| 1456 | |||
| 1457 | /* Initialize the level-tracking arrays. */ | ||
| 1458 | |||
| 1459 | for (i = 1; i < NUM_RCU_LVLS; i++) | ||
| 1460 | rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; | ||
| 1461 | rcu_init_levelspread(rsp); | ||
| 1462 | |||
| 1463 | /* Initialize the elements themselves, starting from the leaves. */ | ||
| 1464 | |||
| 1465 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { | ||
| 1466 | cpustride *= rsp->levelspread[i]; | ||
| 1467 | rnp = rsp->level[i]; | ||
| 1468 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | ||
| 1469 | spin_lock_init(&rnp->lock); | ||
| 1470 | rnp->qsmask = 0; | ||
| 1471 | rnp->qsmaskinit = 0; | ||
| 1472 | rnp->grplo = j * cpustride; | ||
| 1473 | rnp->grphi = (j + 1) * cpustride - 1; | ||
| 1474 | if (rnp->grphi >= NR_CPUS) | ||
| 1475 | rnp->grphi = NR_CPUS - 1; | ||
| 1476 | if (i == 0) { | ||
| 1477 | rnp->grpnum = 0; | ||
| 1478 | rnp->grpmask = 0; | ||
| 1479 | rnp->parent = NULL; | ||
| 1480 | } else { | ||
| 1481 | rnp->grpnum = j % rsp->levelspread[i - 1]; | ||
| 1482 | rnp->grpmask = 1UL << rnp->grpnum; | ||
| 1483 | rnp->parent = rsp->level[i - 1] + | ||
| 1484 | j / rsp->levelspread[i - 1]; | ||
| 1485 | } | ||
| 1486 | rnp->level = i; | ||
| 1487 | } | ||
| 1488 | } | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | /* | ||
| 1492 | * Helper macro for __rcu_init(). To be used nowhere else! | ||
| 1493 | * Assigns leaf node pointers into each CPU's rcu_data structure. | ||
| 1494 | */ | ||
| 1495 | #define RCU_DATA_PTR_INIT(rsp, rcu_data) \ | ||
| 1496 | do { \ | ||
| 1497 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | ||
| 1498 | j = 0; \ | ||
| 1499 | for_each_possible_cpu(i) { \ | ||
| 1500 | if (i > rnp[j].grphi) \ | ||
| 1501 | j++; \ | ||
| 1502 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | ||
| 1503 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | ||
| 1504 | } \ | ||
| 1505 | } while (0) | ||
| 1506 | |||
| 1507 | static struct notifier_block __cpuinitdata rcu_nb = { | ||
| 1508 | .notifier_call = rcu_cpu_notify, | ||
| 1509 | }; | ||
| 1510 | |||
| 1511 | void __init __rcu_init(void) | ||
| 1512 | { | ||
| 1513 | int i; /* All used by RCU_DATA_PTR_INIT(). */ | ||
| 1514 | int j; | ||
| 1515 | struct rcu_node *rnp; | ||
| 1516 | |||
| 1517 | printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n"); | ||
| 1518 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 1519 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
| 1520 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 1521 | rcu_init_one(&rcu_state); | ||
| 1522 | RCU_DATA_PTR_INIT(&rcu_state, rcu_data); | ||
| 1523 | rcu_init_one(&rcu_bh_state); | ||
| 1524 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); | ||
| 1525 | |||
| 1526 | for_each_online_cpu(i) | ||
| 1527 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | ||
| 1528 | /* Register notifier for non-boot CPUs */ | ||
| 1529 | register_cpu_notifier(&rcu_nb); | ||
| 1530 | printk(KERN_WARNING "Experimental hierarchical RCU init done.\n"); | ||
| 1531 | } | ||
| 1532 | |||
| 1533 | module_param(blimit, int, 0); | ||
| 1534 | module_param(qhimark, int, 0); | ||
| 1535 | module_param(qlowmark, int, 0); | ||
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c new file mode 100644 index 00000000000..d6db3e83782 --- /dev/null +++ b/kernel/rcutree_trace.c | |||
| @@ -0,0 +1,271 @@ | |||
| 1 | /* | ||
| 2 | * Read-Copy Update tracing for classic implementation | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2008 | ||
| 19 | * | ||
| 20 | * Papers: http://www.rdrop.com/users/paulmck/RCU | ||
| 21 | * | ||
| 22 | * For detailed explanation of Read-Copy Update mechanism see - | ||
| 23 | * Documentation/RCU | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | #include <linux/types.h> | ||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/spinlock.h> | ||
| 30 | #include <linux/smp.h> | ||
| 31 | #include <linux/rcupdate.h> | ||
| 32 | #include <linux/interrupt.h> | ||
| 33 | #include <linux/sched.h> | ||
| 34 | #include <asm/atomic.h> | ||
| 35 | #include <linux/bitops.h> | ||
| 36 | #include <linux/module.h> | ||
| 37 | #include <linux/completion.h> | ||
| 38 | #include <linux/moduleparam.h> | ||
| 39 | #include <linux/percpu.h> | ||
| 40 | #include <linux/notifier.h> | ||
| 41 | #include <linux/cpu.h> | ||
| 42 | #include <linux/mutex.h> | ||
| 43 | #include <linux/debugfs.h> | ||
| 44 | #include <linux/seq_file.h> | ||
| 45 | |||
| 46 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | ||
| 47 | { | ||
| 48 | if (!rdp->beenonline) | ||
| 49 | return; | ||
| 50 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", | ||
| 51 | rdp->cpu, | ||
| 52 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | ||
| 53 | rdp->completed, rdp->gpnum, | ||
| 54 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | ||
| 55 | rdp->qs_pending, | ||
| 56 | rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, | ||
| 57 | (int)(rdp->n_rcu_pending & 0xffff)); | ||
| 58 | #ifdef CONFIG_NO_HZ | ||
| 59 | seq_printf(m, " dt=%d/%d dn=%d df=%lu", | ||
| 60 | rdp->dynticks->dynticks, | ||
| 61 | rdp->dynticks->dynticks_nesting, | ||
| 62 | rdp->dynticks->dynticks_nmi, | ||
| 63 | rdp->dynticks_fqs); | ||
| 64 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 65 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); | ||
| 66 | seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit); | ||
| 67 | } | ||
| 68 | |||
| 69 | #define PRINT_RCU_DATA(name, func, m) \ | ||
| 70 | do { \ | ||
| 71 | int _p_r_d_i; \ | ||
| 72 | \ | ||
| 73 | for_each_possible_cpu(_p_r_d_i) \ | ||
| 74 | func(m, &per_cpu(name, _p_r_d_i)); \ | ||
| 75 | } while (0) | ||
| 76 | |||
| 77 | static int show_rcudata(struct seq_file *m, void *unused) | ||
| 78 | { | ||
| 79 | seq_puts(m, "rcu:\n"); | ||
| 80 | PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m); | ||
| 81 | seq_puts(m, "rcu_bh:\n"); | ||
| 82 | PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | static int rcudata_open(struct inode *inode, struct file *file) | ||
| 87 | { | ||
| 88 | return single_open(file, show_rcudata, NULL); | ||
| 89 | } | ||
| 90 | |||
| 91 | static struct file_operations rcudata_fops = { | ||
| 92 | .owner = THIS_MODULE, | ||
| 93 | .open = rcudata_open, | ||
| 94 | .read = seq_read, | ||
| 95 | .llseek = seq_lseek, | ||
| 96 | .release = single_release, | ||
| 97 | }; | ||
| 98 | |||
| 99 | static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | ||
| 100 | { | ||
| 101 | if (!rdp->beenonline) | ||
| 102 | return; | ||
| 103 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", | ||
| 104 | rdp->cpu, | ||
| 105 | cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", | ||
| 106 | rdp->completed, rdp->gpnum, | ||
| 107 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | ||
| 108 | rdp->qs_pending, | ||
| 109 | rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, | ||
| 110 | rdp->n_rcu_pending); | ||
| 111 | #ifdef CONFIG_NO_HZ | ||
| 112 | seq_printf(m, ",%d,%d,%d,%lu", | ||
| 113 | rdp->dynticks->dynticks, | ||
| 114 | rdp->dynticks->dynticks_nesting, | ||
| 115 | rdp->dynticks->dynticks_nmi, | ||
| 116 | rdp->dynticks_fqs); | ||
| 117 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 118 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); | ||
| 119 | seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit); | ||
| 120 | } | ||
| 121 | |||
| 122 | static int show_rcudata_csv(struct seq_file *m, void *unused) | ||
| 123 | { | ||
| 124 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); | ||
| 125 | #ifdef CONFIG_NO_HZ | ||
| 126 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); | ||
| 127 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 128 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); | ||
| 129 | seq_puts(m, "\"rcu:\"\n"); | ||
| 130 | PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m); | ||
| 131 | seq_puts(m, "\"rcu_bh:\"\n"); | ||
| 132 | PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); | ||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | static int rcudata_csv_open(struct inode *inode, struct file *file) | ||
| 137 | { | ||
| 138 | return single_open(file, show_rcudata_csv, NULL); | ||
| 139 | } | ||
| 140 | |||
| 141 | static struct file_operations rcudata_csv_fops = { | ||
| 142 | .owner = THIS_MODULE, | ||
| 143 | .open = rcudata_csv_open, | ||
| 144 | .read = seq_read, | ||
| 145 | .llseek = seq_lseek, | ||
| 146 | .release = single_release, | ||
| 147 | }; | ||
| 148 | |||
| 149 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | ||
| 150 | { | ||
| 151 | int level = 0; | ||
| 152 | struct rcu_node *rnp; | ||
| 153 | |||
| 154 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " | ||
| 155 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", | ||
| 156 | rsp->completed, rsp->gpnum, rsp->signaled, | ||
| 157 | (long)(rsp->jiffies_force_qs - jiffies), | ||
| 158 | (int)(jiffies & 0xffff), | ||
| 159 | rsp->n_force_qs, rsp->n_force_qs_ngp, | ||
| 160 | rsp->n_force_qs - rsp->n_force_qs_ngp, | ||
| 161 | rsp->n_force_qs_lh); | ||
| 162 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { | ||
| 163 | if (rnp->level != level) { | ||
| 164 | seq_puts(m, "\n"); | ||
| 165 | level = rnp->level; | ||
| 166 | } | ||
| 167 | seq_printf(m, "%lx/%lx %d:%d ^%d ", | ||
| 168 | rnp->qsmask, rnp->qsmaskinit, | ||
| 169 | rnp->grplo, rnp->grphi, rnp->grpnum); | ||
| 170 | } | ||
| 171 | seq_puts(m, "\n"); | ||
| 172 | } | ||
| 173 | |||
| 174 | static int show_rcuhier(struct seq_file *m, void *unused) | ||
| 175 | { | ||
| 176 | seq_puts(m, "rcu:\n"); | ||
| 177 | print_one_rcu_state(m, &rcu_state); | ||
| 178 | seq_puts(m, "rcu_bh:\n"); | ||
| 179 | print_one_rcu_state(m, &rcu_bh_state); | ||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int rcuhier_open(struct inode *inode, struct file *file) | ||
| 184 | { | ||
| 185 | return single_open(file, show_rcuhier, NULL); | ||
| 186 | } | ||
| 187 | |||
| 188 | static struct file_operations rcuhier_fops = { | ||
| 189 | .owner = THIS_MODULE, | ||
| 190 | .open = rcuhier_open, | ||
| 191 | .read = seq_read, | ||
| 192 | .llseek = seq_lseek, | ||
| 193 | .release = single_release, | ||
| 194 | }; | ||
| 195 | |||
| 196 | static int show_rcugp(struct seq_file *m, void *unused) | ||
| 197 | { | ||
| 198 | seq_printf(m, "rcu: completed=%ld gpnum=%ld\n", | ||
| 199 | rcu_state.completed, rcu_state.gpnum); | ||
| 200 | seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", | ||
| 201 | rcu_bh_state.completed, rcu_bh_state.gpnum); | ||
| 202 | return 0; | ||
| 203 | } | ||
| 204 | |||
| 205 | static int rcugp_open(struct inode *inode, struct file *file) | ||
| 206 | { | ||
| 207 | return single_open(file, show_rcugp, NULL); | ||
| 208 | } | ||
| 209 | |||
| 210 | static struct file_operations rcugp_fops = { | ||
| 211 | .owner = THIS_MODULE, | ||
| 212 | .open = rcugp_open, | ||
| 213 | .read = seq_read, | ||
| 214 | .llseek = seq_lseek, | ||
| 215 | .release = single_release, | ||
| 216 | }; | ||
| 217 | |||
| 218 | static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir; | ||
| 219 | static int __init rcuclassic_trace_init(void) | ||
| 220 | { | ||
| 221 | rcudir = debugfs_create_dir("rcu", NULL); | ||
| 222 | if (!rcudir) | ||
| 223 | goto out; | ||
| 224 | |||
| 225 | datadir = debugfs_create_file("rcudata", 0444, rcudir, | ||
| 226 | NULL, &rcudata_fops); | ||
| 227 | if (!datadir) | ||
| 228 | goto free_out; | ||
| 229 | |||
| 230 | datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir, | ||
| 231 | NULL, &rcudata_csv_fops); | ||
| 232 | if (!datadir_csv) | ||
| 233 | goto free_out; | ||
| 234 | |||
| 235 | gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); | ||
| 236 | if (!gpdir) | ||
| 237 | goto free_out; | ||
| 238 | |||
| 239 | hierdir = debugfs_create_file("rcuhier", 0444, rcudir, | ||
| 240 | NULL, &rcuhier_fops); | ||
| 241 | if (!hierdir) | ||
| 242 | goto free_out; | ||
| 243 | return 0; | ||
| 244 | free_out: | ||
| 245 | if (datadir) | ||
| 246 | debugfs_remove(datadir); | ||
| 247 | if (datadir_csv) | ||
| 248 | debugfs_remove(datadir_csv); | ||
| 249 | if (gpdir) | ||
| 250 | debugfs_remove(gpdir); | ||
| 251 | debugfs_remove(rcudir); | ||
| 252 | out: | ||
| 253 | return 1; | ||
| 254 | } | ||
| 255 | |||
| 256 | static void __exit rcuclassic_trace_cleanup(void) | ||
| 257 | { | ||
| 258 | debugfs_remove(datadir); | ||
| 259 | debugfs_remove(datadir_csv); | ||
| 260 | debugfs_remove(gpdir); | ||
| 261 | debugfs_remove(hierdir); | ||
| 262 | debugfs_remove(rcudir); | ||
| 263 | } | ||
| 264 | |||
| 265 | |||
| 266 | module_init(rcuclassic_trace_init); | ||
| 267 | module_exit(rcuclassic_trace_cleanup); | ||
| 268 | |||
| 269 | MODULE_AUTHOR("Paul E. McKenney"); | ||
| 270 | MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation"); | ||
| 271 | MODULE_LICENSE("GPL"); | ||
diff --git a/kernel/resource.c b/kernel/resource.c index 4337063663e..e633106b12f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) | |||
| 853 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && | 853 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && |
| 854 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) | 854 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) |
| 855 | continue; | 855 | continue; |
| 856 | /* | ||
| 857 | * if a resource is "BUSY", it's not a hardware resource | ||
| 858 | * but a driver mapping of such a resource; we don't want | ||
| 859 | * to warn for those; some drivers legitimately map only | ||
| 860 | * partial hardware resources. (example: vesafb) | ||
| 861 | */ | ||
| 862 | if (p->flags & IORESOURCE_BUSY) | ||
| 863 | continue; | ||
| 864 | |||
| 856 | printk(KERN_WARNING "resource map sanity check conflict: " | 865 | printk(KERN_WARNING "resource map sanity check conflict: " |
| 857 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", | 866 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", |
| 858 | (unsigned long long)addr, | 867 | (unsigned long long)addr, |
diff --git a/kernel/softirq.c b/kernel/softirq.c index e7c69a720d6..466e75ce271 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -102,20 +102,6 @@ void local_bh_disable(void) | |||
| 102 | 102 | ||
| 103 | EXPORT_SYMBOL(local_bh_disable); | 103 | EXPORT_SYMBOL(local_bh_disable); |
| 104 | 104 | ||
| 105 | void __local_bh_enable(void) | ||
| 106 | { | ||
| 107 | WARN_ON_ONCE(in_irq()); | ||
| 108 | |||
| 109 | /* | ||
| 110 | * softirqs should never be enabled by __local_bh_enable(), | ||
| 111 | * it always nests inside local_bh_enable() sections: | ||
| 112 | */ | ||
| 113 | WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); | ||
| 114 | |||
| 115 | sub_preempt_count(SOFTIRQ_OFFSET); | ||
| 116 | } | ||
| 117 | EXPORT_SYMBOL_GPL(__local_bh_enable); | ||
| 118 | |||
| 119 | /* | 105 | /* |
| 120 | * Special-case - softirqs can safely be enabled in | 106 | * Special-case - softirqs can safely be enabled in |
| 121 | * cond_resched_softirq(), or by __do_softirq(), | 107 | * cond_resched_softirq(), or by __do_softirq(), |
| @@ -269,6 +255,7 @@ void irq_enter(void) | |||
| 269 | { | 255 | { |
| 270 | int cpu = smp_processor_id(); | 256 | int cpu = smp_processor_id(); |
| 271 | 257 | ||
| 258 | rcu_irq_enter(); | ||
| 272 | if (idle_cpu(cpu) && !in_interrupt()) { | 259 | if (idle_cpu(cpu) && !in_interrupt()) { |
| 273 | __irq_enter(); | 260 | __irq_enter(); |
| 274 | tick_check_idle(cpu); | 261 | tick_check_idle(cpu); |
| @@ -295,9 +282,9 @@ void irq_exit(void) | |||
| 295 | 282 | ||
| 296 | #ifdef CONFIG_NO_HZ | 283 | #ifdef CONFIG_NO_HZ |
| 297 | /* Make sure that timer wheel updates are propagated */ | 284 | /* Make sure that timer wheel updates are propagated */ |
| 298 | if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) | ||
| 299 | tick_nohz_stop_sched_tick(0); | ||
| 300 | rcu_irq_exit(); | 285 | rcu_irq_exit(); |
| 286 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) | ||
| 287 | tick_nohz_stop_sched_tick(0); | ||
| 301 | #endif | 288 | #endif |
| 302 | preempt_enable_no_resched(); | 289 | preempt_enable_no_resched(); |
| 303 | } | 290 | } |
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index 94b527ef1d1..eb212f8f8bc 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 6 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 7 | */ | 7 | */ |
| 8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
| 9 | #include <linux/kernel.h> | ||
| 9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 10 | #include <linux/kallsyms.h> | 11 | #include <linux/kallsyms.h> |
| 11 | #include <linux/stacktrace.h> | 12 | #include <linux/stacktrace.h> |
| @@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces) | |||
| 24 | } | 25 | } |
| 25 | EXPORT_SYMBOL_GPL(print_stack_trace); | 26 | EXPORT_SYMBOL_GPL(print_stack_trace); |
| 26 | 27 | ||
| 28 | /* | ||
| 29 | * Architectures that do not implement save_stack_trace_tsk get this | ||
| 30 | * weak alias and a once-per-bootup warning (whenever this facility | ||
| 31 | * is utilized - for example by procfs): | ||
| 32 | */ | ||
| 33 | __weak void | ||
| 34 | save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
| 35 | { | ||
| 36 | WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n"); | ||
| 37 | } | ||
