diff options
-rw-r--r-- | Documentation/DocBook/device-drivers.tmpl | 5 | ||||
-rw-r--r-- | Documentation/RCU/checklist.txt | 4 | ||||
-rw-r--r-- | Documentation/RCU/stallwarn.txt | 22 | ||||
-rw-r--r-- | Documentation/kernel-parameters.txt | 95 | ||||
-rw-r--r-- | Documentation/kernel-per-CPU-kthreads.txt | 17 | ||||
-rw-r--r-- | MAINTAINERS | 11 | ||||
-rw-r--r-- | include/linux/rculist.h | 23 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 24 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 17 | ||||
-rw-r--r-- | include/linux/rcutree.h | 2 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 80 | ||||
-rw-r--r-- | kernel/Makefile | 11 | ||||
-rw-r--r-- | kernel/lockdep.c | 4 | ||||
-rw-r--r-- | kernel/rcu/Makefile | 6 | ||||
-rw-r--r-- | kernel/rcu/rcu.h (renamed from kernel/rcu.h) | 7 | ||||
-rw-r--r-- | kernel/rcu/srcu.c (renamed from kernel/srcu.c) | 0 | ||||
-rw-r--r-- | kernel/rcu/tiny.c (renamed from kernel/rcutiny.c) | 37 | ||||
-rw-r--r-- | kernel/rcu/tiny_plugin.h (renamed from kernel/rcutiny_plugin.h) | 0 | ||||
-rw-r--r-- | kernel/rcu/torture.c (renamed from kernel/rcutorture.c) | 6 | ||||
-rw-r--r-- | kernel/rcu/tree.c (renamed from kernel/rcutree.c) | 185 | ||||
-rw-r--r-- | kernel/rcu/tree.h (renamed from kernel/rcutree.h) | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h (renamed from kernel/rcutree_plugin.h) | 84 | ||||
-rw-r--r-- | kernel/rcu/tree_trace.c (renamed from kernel/rcutree_trace.c) | 2 | ||||
-rw-r--r-- | kernel/rcu/update.c (renamed from kernel/rcupdate.c) | 10 |
24 files changed, 464 insertions, 190 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index fe397f90a34f..6c9d9d37c83a 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -87,7 +87,10 @@ X!Iinclude/linux/kobject.h | |||
87 | !Ekernel/printk/printk.c | 87 | !Ekernel/printk/printk.c |
88 | !Ekernel/panic.c | 88 | !Ekernel/panic.c |
89 | !Ekernel/sys.c | 89 | !Ekernel/sys.c |
90 | !Ekernel/rcupdate.c | 90 | !Ekernel/rcu/srcu.c |
91 | !Ekernel/rcu/tree.c | ||
92 | !Ekernel/rcu/tree_plugin.h | ||
93 | !Ekernel/rcu/update.c | ||
91 | </sect1> | 94 | </sect1> |
92 | 95 | ||
93 | <sect1><title>Device Resource Management</title> | 96 | <sect1><title>Device Resource Management</title> |
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index 7703ec73a9bb..91266193b8f4 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt | |||
@@ -202,8 +202,8 @@ over a rather long period of time, but improvements are always welcome! | |||
202 | updater uses call_rcu_sched() or synchronize_sched(), then | 202 | updater uses call_rcu_sched() or synchronize_sched(), then |
203 | the corresponding readers must disable preemption, possibly | 203 | the corresponding readers must disable preemption, possibly |
204 | by calling rcu_read_lock_sched() and rcu_read_unlock_sched(). | 204 | by calling rcu_read_lock_sched() and rcu_read_unlock_sched(). |
205 | If the updater uses synchronize_srcu() or call_srcu(), | 205 | If the updater uses synchronize_srcu() or call_srcu(), then |
206 | the the corresponding readers must use srcu_read_lock() and | 206 | the corresponding readers must use srcu_read_lock() and |
207 | srcu_read_unlock(), and with the same srcu_struct. The rules for | 207 | srcu_read_unlock(), and with the same srcu_struct. The rules for |
208 | the expedited primitives are the same as for their non-expedited | 208 | the expedited primitives are the same as for their non-expedited |
209 | counterparts. Mixing things up will result in confusion and | 209 | counterparts. Mixing things up will result in confusion and |
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 8e9359de1d28..6f3a0057548e 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt | |||
@@ -12,12 +12,12 @@ CONFIG_RCU_CPU_STALL_TIMEOUT | |||
12 | This kernel configuration parameter defines the period of time | 12 | This kernel configuration parameter defines the period of time |
13 | that RCU will wait from the beginning of a grace period until it | 13 | that RCU will wait from the beginning of a grace period until it |
14 | issues an RCU CPU stall warning. This time period is normally | 14 | issues an RCU CPU stall warning. This time period is normally |
15 | sixty seconds. | 15 | 21 seconds. |
16 | 16 | ||
17 | This configuration parameter may be changed at runtime via the | 17 | This configuration parameter may be changed at runtime via the |
18 | /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however | 18 | /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however |
19 | this parameter is checked only at the beginning of a cycle. | 19 | this parameter is checked only at the beginning of a cycle. |
20 | So if you are 30 seconds into a 70-second stall, setting this | 20 | So if you are 10 seconds into a 40-second stall, setting this |
21 | sysfs parameter to (say) five will shorten the timeout for the | 21 | sysfs parameter to (say) five will shorten the timeout for the |
22 | -next- stall, or the following warning for the current stall | 22 | -next- stall, or the following warning for the current stall |
23 | (assuming the stall lasts long enough). It will not affect the | 23 | (assuming the stall lasts long enough). It will not affect the |
@@ -32,7 +32,7 @@ CONFIG_RCU_CPU_STALL_VERBOSE | |||
32 | also dump the stacks of any tasks that are blocking the current | 32 | also dump the stacks of any tasks that are blocking the current |
33 | RCU-preempt grace period. | 33 | RCU-preempt grace period. |
34 | 34 | ||
35 | RCU_CPU_STALL_INFO | 35 | CONFIG_RCU_CPU_STALL_INFO |
36 | 36 | ||
37 | This kernel configuration parameter causes the stall warning to | 37 | This kernel configuration parameter causes the stall warning to |
38 | print out additional per-CPU diagnostic information, including | 38 | print out additional per-CPU diagnostic information, including |
@@ -43,7 +43,8 @@ RCU_STALL_DELAY_DELTA | |||
43 | Although the lockdep facility is extremely useful, it does add | 43 | Although the lockdep facility is extremely useful, it does add |
44 | some overhead. Therefore, under CONFIG_PROVE_RCU, the | 44 | some overhead. Therefore, under CONFIG_PROVE_RCU, the |
45 | RCU_STALL_DELAY_DELTA macro allows five extra seconds before | 45 | RCU_STALL_DELAY_DELTA macro allows five extra seconds before |
46 | giving an RCU CPU stall warning message. | 46 | giving an RCU CPU stall warning message. (This is a cpp |
47 | macro, not a kernel configuration parameter.) | ||
47 | 48 | ||
48 | RCU_STALL_RAT_DELAY | 49 | RCU_STALL_RAT_DELAY |
49 | 50 | ||
@@ -52,7 +53,8 @@ RCU_STALL_RAT_DELAY | |||
52 | However, if the offending CPU does not detect its own stall in | 53 | However, if the offending CPU does not detect its own stall in |
53 | the number of jiffies specified by RCU_STALL_RAT_DELAY, then | 54 | the number of jiffies specified by RCU_STALL_RAT_DELAY, then |
54 | some other CPU will complain. This delay is normally set to | 55 | some other CPU will complain. This delay is normally set to |
55 | two jiffies. | 56 | two jiffies. (This is a cpp macro, not a kernel configuration |
57 | parameter.) | ||
56 | 58 | ||
57 | When a CPU detects that it is stalling, it will print a message similar | 59 | When a CPU detects that it is stalling, it will print a message similar |
58 | to the following: | 60 | to the following: |
@@ -86,7 +88,12 @@ printing, there will be a spurious stall-warning message: | |||
86 | 88 | ||
87 | INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies) | 89 | INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies) |
88 | 90 | ||
89 | This is rare, but does happen from time to time in real life. | 91 | This is rare, but does happen from time to time in real life. It is also |
92 | possible for a zero-jiffy stall to be flagged in this case, depending | ||
93 | on how the stall warning and the grace-period initialization happen to | ||
94 | interact. Please note that it is not possible to entirely eliminate this | ||
95 | sort of false positive without resorting to things like stop_machine(), | ||
96 | which is overkill for this sort of problem. | ||
90 | 97 | ||
91 | If the CONFIG_RCU_CPU_STALL_INFO kernel configuration parameter is set, | 98 | If the CONFIG_RCU_CPU_STALL_INFO kernel configuration parameter is set, |
92 | more information is printed with the stall-warning message, for example: | 99 | more information is printed with the stall-warning message, for example: |
@@ -216,4 +223,5 @@ that portion of the stack which remains the same from trace to trace. | |||
216 | If you can reliably trigger the stall, ftrace can be quite helpful. | 223 | If you can reliably trigger the stall, ftrace can be quite helpful. |
217 | 224 | ||
218 | RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE | 225 | RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE |
219 | and with RCU's event tracing. | 226 | and with RCU's event tracing. For information on RCU's event tracing, |
227 | see include/trace/events/rcu.h. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index fcbb736d55fe..203f4a9d9efe 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2599,7 +2599,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2599 | ramdisk_size= [RAM] Sizes of RAM disks in kilobytes | 2599 | ramdisk_size= [RAM] Sizes of RAM disks in kilobytes |
2600 | See Documentation/blockdev/ramdisk.txt. | 2600 | See Documentation/blockdev/ramdisk.txt. |
2601 | 2601 | ||
2602 | rcu_nocbs= [KNL,BOOT] | 2602 | rcu_nocbs= [KNL] |
2603 | In kernels built with CONFIG_RCU_NOCB_CPU=y, set | 2603 | In kernels built with CONFIG_RCU_NOCB_CPU=y, set |
2604 | the specified list of CPUs to be no-callback CPUs. | 2604 | the specified list of CPUs to be no-callback CPUs. |
2605 | Invocation of these CPUs' RCU callbacks will | 2605 | Invocation of these CPUs' RCU callbacks will |
@@ -2612,7 +2612,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2612 | real-time workloads. It can also improve energy | 2612 | real-time workloads. It can also improve energy |
2613 | efficiency for asymmetric multiprocessors. | 2613 | efficiency for asymmetric multiprocessors. |
2614 | 2614 | ||
2615 | rcu_nocb_poll [KNL,BOOT] | 2615 | rcu_nocb_poll [KNL] |
2616 | Rather than requiring that offloaded CPUs | 2616 | Rather than requiring that offloaded CPUs |
2617 | (specified by rcu_nocbs= above) explicitly | 2617 | (specified by rcu_nocbs= above) explicitly |
2618 | awaken the corresponding "rcuoN" kthreads, | 2618 | awaken the corresponding "rcuoN" kthreads, |
@@ -2623,126 +2623,145 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2623 | energy efficiency by requiring that the kthreads | 2623 | energy efficiency by requiring that the kthreads |
2624 | periodically wake up to do the polling. | 2624 | periodically wake up to do the polling. |
2625 | 2625 | ||
2626 | rcutree.blimit= [KNL,BOOT] | 2626 | rcutree.blimit= [KNL] |
2627 | Set maximum number of finished RCU callbacks to process | 2627 | Set maximum number of finished RCU callbacks to process |
2628 | in one batch. | 2628 | in one batch. |
2629 | 2629 | ||
2630 | rcutree.fanout_leaf= [KNL,BOOT] | 2630 | rcutree.rcu_fanout_leaf= [KNL] |
2631 | Increase the number of CPUs assigned to each | 2631 | Increase the number of CPUs assigned to each |
2632 | leaf rcu_node structure. Useful for very large | 2632 | leaf rcu_node structure. Useful for very large |
2633 | systems. | 2633 | systems. |
2634 | 2634 | ||
2635 | rcutree.jiffies_till_first_fqs= [KNL,BOOT] | 2635 | rcutree.jiffies_till_first_fqs= [KNL] |
2636 | Set delay from grace-period initialization to | 2636 | Set delay from grace-period initialization to |
2637 | first attempt to force quiescent states. | 2637 | first attempt to force quiescent states. |
2638 | Units are jiffies, minimum value is zero, | 2638 | Units are jiffies, minimum value is zero, |
2639 | and maximum value is HZ. | 2639 | and maximum value is HZ. |
2640 | 2640 | ||
2641 | rcutree.jiffies_till_next_fqs= [KNL,BOOT] | 2641 | rcutree.jiffies_till_next_fqs= [KNL] |
2642 | Set delay between subsequent attempts to force | 2642 | Set delay between subsequent attempts to force |
2643 | quiescent states. Units are jiffies, minimum | 2643 | quiescent states. Units are jiffies, minimum |
2644 | value is one, and maximum value is HZ. | 2644 | value is one, and maximum value is HZ. |
2645 | 2645 | ||
2646 | rcutree.qhimark= [KNL,BOOT] | 2646 | rcutree.qhimark= [KNL] |
2647 | Set threshold of queued | 2647 | Set threshold of queued |
2648 | RCU callbacks over which batch limiting is disabled. | 2648 | RCU callbacks over which batch limiting is disabled. |
2649 | 2649 | ||
2650 | rcutree.qlowmark= [KNL,BOOT] | 2650 | rcutree.qlowmark= [KNL] |
2651 | Set threshold of queued RCU callbacks below which | 2651 | Set threshold of queued RCU callbacks below which |
2652 | batch limiting is re-enabled. | 2652 | batch limiting is re-enabled. |
2653 | 2653 | ||
2654 | rcutree.rcu_cpu_stall_suppress= [KNL,BOOT] | 2654 | rcutree.rcu_idle_gp_delay= [KNL] |
2655 | Suppress RCU CPU stall warning messages. | ||
2656 | |||
2657 | rcutree.rcu_cpu_stall_timeout= [KNL,BOOT] | ||
2658 | Set timeout for RCU CPU stall warning messages. | ||
2659 | |||
2660 | rcutree.rcu_idle_gp_delay= [KNL,BOOT] | ||
2661 | Set wakeup interval for idle CPUs that have | 2655 | Set wakeup interval for idle CPUs that have |
2662 | RCU callbacks (RCU_FAST_NO_HZ=y). | 2656 | RCU callbacks (RCU_FAST_NO_HZ=y). |
2663 | 2657 | ||
2664 | rcutree.rcu_idle_lazy_gp_delay= [KNL,BOOT] | 2658 | rcutree.rcu_idle_lazy_gp_delay= [KNL] |
2665 | Set wakeup interval for idle CPUs that have | 2659 | Set wakeup interval for idle CPUs that have |
2666 | only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y). | 2660 | only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y). |
2667 | Lazy RCU callbacks are those which RCU can | 2661 | Lazy RCU callbacks are those which RCU can |
2668 | prove do nothing more than free memory. | 2662 | prove do nothing more than free memory. |
2669 | 2663 | ||
2670 | rcutorture.fqs_duration= [KNL,BOOT] | 2664 | rcutorture.fqs_duration= [KNL] |
2671 | Set duration of force_quiescent_state bursts. | 2665 | Set duration of force_quiescent_state bursts. |
2672 | 2666 | ||
2673 | rcutorture.fqs_holdoff= [KNL,BOOT] | 2667 | rcutorture.fqs_holdoff= [KNL] |
2674 | Set holdoff time within force_quiescent_state bursts. | 2668 | Set holdoff time within force_quiescent_state bursts. |
2675 | 2669 | ||
2676 | rcutorture.fqs_stutter= [KNL,BOOT] | 2670 | rcutorture.fqs_stutter= [KNL] |
2677 | Set wait time between force_quiescent_state bursts. | 2671 | Set wait time between force_quiescent_state bursts. |
2678 | 2672 | ||
2679 | rcutorture.irqreader= [KNL,BOOT] | 2673 | rcutorture.gp_exp= [KNL] |
2680 | Test RCU readers from irq handlers. | 2674 | Use expedited update-side primitives. |
2675 | |||
2676 | rcutorture.gp_normal= [KNL] | ||
2677 | Use normal (non-expedited) update-side primitives. | ||
2678 | If both gp_exp and gp_normal are set, do both. | ||
2679 | If neither gp_exp nor gp_normal are set, still | ||
2680 | do both. | ||
2681 | 2681 | ||
2682 | rcutorture.n_barrier_cbs= [KNL,BOOT] | 2682 | rcutorture.n_barrier_cbs= [KNL] |
2683 | Set callbacks/threads for rcu_barrier() testing. | 2683 | Set callbacks/threads for rcu_barrier() testing. |
2684 | 2684 | ||
2685 | rcutorture.nfakewriters= [KNL,BOOT] | 2685 | rcutorture.nfakewriters= [KNL] |
2686 | Set number of concurrent RCU writers. These just | 2686 | Set number of concurrent RCU writers. These just |
2687 | stress RCU, they don't participate in the actual | 2687 | stress RCU, they don't participate in the actual |
2688 | test, hence the "fake". | 2688 | test, hence the "fake". |
2689 | 2689 | ||
2690 | rcutorture.nreaders= [KNL,BOOT] | 2690 | rcutorture.nreaders= [KNL] |
2691 | Set number of RCU readers. | 2691 | Set number of RCU readers. |
2692 | 2692 | ||
2693 | rcutorture.onoff_holdoff= [KNL,BOOT] | 2693 | rcutorture.object_debug= [KNL] |
2694 | Enable debug-object double-call_rcu() testing. | ||
2695 | |||
2696 | rcutorture.onoff_holdoff= [KNL] | ||
2694 | Set time (s) after boot for CPU-hotplug testing. | 2697 | Set time (s) after boot for CPU-hotplug testing. |
2695 | 2698 | ||
2696 | rcutorture.onoff_interval= [KNL,BOOT] | 2699 | rcutorture.onoff_interval= [KNL] |
2697 | Set time (s) between CPU-hotplug operations, or | 2700 | Set time (s) between CPU-hotplug operations, or |
2698 | zero to disable CPU-hotplug testing. | 2701 | zero to disable CPU-hotplug testing. |
2699 | 2702 | ||
2700 | rcutorture.shuffle_interval= [KNL,BOOT] | 2703 | rcutorture.rcutorture_runnable= [BOOT] |
2704 | Start rcutorture running at boot time. | ||
2705 | |||
2706 | rcutorture.shuffle_interval= [KNL] | ||
2701 | Set task-shuffle interval (s). Shuffling tasks | 2707 | Set task-shuffle interval (s). Shuffling tasks |
2702 | allows some CPUs to go into dyntick-idle mode | 2708 | allows some CPUs to go into dyntick-idle mode |
2703 | during the rcutorture test. | 2709 | during the rcutorture test. |
2704 | 2710 | ||
2705 | rcutorture.shutdown_secs= [KNL,BOOT] | 2711 | rcutorture.shutdown_secs= [KNL] |
2706 | Set time (s) after boot system shutdown. This | 2712 | Set time (s) after boot system shutdown. This |
2707 | is useful for hands-off automated testing. | 2713 | is useful for hands-off automated testing. |
2708 | 2714 | ||
2709 | rcutorture.stall_cpu= [KNL,BOOT] | 2715 | rcutorture.stall_cpu= [KNL] |
2710 | Duration of CPU stall (s) to test RCU CPU stall | 2716 | Duration of CPU stall (s) to test RCU CPU stall |
2711 | warnings, zero to disable. | 2717 | warnings, zero to disable. |
2712 | 2718 | ||
2713 | rcutorture.stall_cpu_holdoff= [KNL,BOOT] | 2719 | rcutorture.stall_cpu_holdoff= [KNL] |
2714 | Time to wait (s) after boot before inducing stall. | 2720 | Time to wait (s) after boot before inducing stall. |
2715 | 2721 | ||
2716 | rcutorture.stat_interval= [KNL,BOOT] | 2722 | rcutorture.stat_interval= [KNL] |
2717 | Time (s) between statistics printk()s. | 2723 | Time (s) between statistics printk()s. |
2718 | 2724 | ||
2719 | rcutorture.stutter= [KNL,BOOT] | 2725 | rcutorture.stutter= [KNL] |
2720 | Time (s) to stutter testing, for example, specifying | 2726 | Time (s) to stutter testing, for example, specifying |
2721 | five seconds causes the test to run for five seconds, | 2727 | five seconds causes the test to run for five seconds, |
2722 | wait for five seconds, and so on. This tests RCU's | 2728 | wait for five seconds, and so on. This tests RCU's |
2723 | ability to transition abruptly to and from idle. | 2729 | ability to transition abruptly to and from idle. |
2724 | 2730 | ||
2725 | rcutorture.test_boost= [KNL,BOOT] | 2731 | rcutorture.test_boost= [KNL] |
2726 | Test RCU priority boosting? 0=no, 1=maybe, 2=yes. | 2732 | Test RCU priority boosting? 0=no, 1=maybe, 2=yes. |
2727 | "Maybe" means test if the RCU implementation | 2733 | "Maybe" means test if the RCU implementation |
2728 | under test support RCU priority boosting. | 2734 | under test support RCU priority boosting. |
2729 | 2735 | ||
2730 | rcutorture.test_boost_duration= [KNL,BOOT] | 2736 | rcutorture.test_boost_duration= [KNL] |
2731 | Duration (s) of each individual boost test. | 2737 | Duration (s) of each individual boost test. |
2732 | 2738 | ||
2733 | rcutorture.test_boost_interval= [KNL,BOOT] | 2739 | rcutorture.test_boost_interval= [KNL] |
2734 | Interval (s) between each boost test. | 2740 | Interval (s) between each boost test. |
2735 | 2741 | ||
2736 | rcutorture.test_no_idle_hz= [KNL,BOOT] | 2742 | rcutorture.test_no_idle_hz= [KNL] |
2737 | Test RCU's dyntick-idle handling. See also the | 2743 | Test RCU's dyntick-idle handling. See also the |
2738 | rcutorture.shuffle_interval parameter. | 2744 | rcutorture.shuffle_interval parameter. |
2739 | 2745 | ||
2740 | rcutorture.torture_type= [KNL,BOOT] | 2746 | rcutorture.torture_type= [KNL] |
2741 | Specify the RCU implementation to test. | 2747 | Specify the RCU implementation to test. |
2742 | 2748 | ||
2743 | rcutorture.verbose= [KNL,BOOT] | 2749 | rcutorture.verbose= [KNL] |
2744 | Enable additional printk() statements. | 2750 | Enable additional printk() statements. |
2745 | 2751 | ||
2752 | rcupdate.rcu_expedited= [KNL] | ||
2753 | Use expedited grace-period primitives, for | ||
2754 | example, synchronize_rcu_expedited() instead | ||
2755 | of synchronize_rcu(). This reduces latency, | ||
2756 | but can increase CPU utilization, degrade | ||
2757 | real-time latency, and degrade energy efficiency. | ||
2758 | |||
2759 | rcupdate.rcu_cpu_stall_suppress= [KNL] | ||
2760 | Suppress RCU CPU stall warning messages. | ||
2761 | |||
2762 | rcupdate.rcu_cpu_stall_timeout= [KNL] | ||
2763 | Set timeout for RCU CPU stall warning messages. | ||
2764 | |||
2746 | rdinit= [KNL] | 2765 | rdinit= [KNL] |
2747 | Format: <full_path> | 2766 | Format: <full_path> |
2748 | Run specified binary instead of /init from the ramdisk, | 2767 | Run specified binary instead of /init from the ramdisk, |
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt index 32351bfabf20..827104fb9364 100644 --- a/Documentation/kernel-per-CPU-kthreads.txt +++ b/Documentation/kernel-per-CPU-kthreads.txt | |||
@@ -181,12 +181,17 @@ To reduce its OS jitter, do any of the following: | |||
181 | make sure that this is safe on your particular system. | 181 | make sure that this is safe on your particular system. |
182 | d. It is not possible to entirely get rid of OS jitter | 182 | d. It is not possible to entirely get rid of OS jitter |
183 | from vmstat_update() on CONFIG_SMP=y systems, but you | 183 | from vmstat_update() on CONFIG_SMP=y systems, but you |
184 | can decrease its frequency by writing a large value to | 184 | can decrease its frequency by writing a large value |
185 | /proc/sys/vm/stat_interval. The default value is HZ, | 185 | to /proc/sys/vm/stat_interval. The default value is |
186 | for an interval of one second. Of course, larger values | 186 | HZ, for an interval of one second. Of course, larger |
187 | will make your virtual-memory statistics update more | 187 | values will make your virtual-memory statistics update |
188 | slowly. Of course, you can also run your workload at | 188 | more slowly. Of course, you can also run your workload |
189 | a real-time priority, thus preempting vmstat_update(). | 189 | at a real-time priority, thus preempting vmstat_update(), |
190 | but if your workload is CPU-bound, this is a bad idea. | ||
191 | However, there is an RFC patch from Christoph Lameter | ||
192 | (based on an earlier one from Gilad Ben-Yossef) that | ||
193 | reduces or even eliminates vmstat overhead for some | ||
194 | workloads at https://lkml.org/lkml/2013/9/4/379. | ||
190 | e. If running on high-end powerpc servers, build with | 195 | e. If running on high-end powerpc servers, build with |
191 | CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS | 196 | CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS |
192 | daemon from running on each CPU every second or so. | 197 | daemon from running on each CPU every second or so. |
diff --git a/MAINTAINERS b/MAINTAINERS index a7c34ef3509d..a97694dbf06f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6935,7 +6935,7 @@ M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | |||
6935 | S: Supported | 6935 | S: Supported |
6936 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git | 6936 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git |
6937 | F: Documentation/RCU/torture.txt | 6937 | F: Documentation/RCU/torture.txt |
6938 | F: kernel/rcutorture.c | 6938 | F: kernel/rcu/torture.c |
6939 | 6939 | ||
6940 | RDC R-321X SoC | 6940 | RDC R-321X SoC |
6941 | M: Florian Fainelli <florian@openwrt.org> | 6941 | M: Florian Fainelli <florian@openwrt.org> |
@@ -6962,8 +6962,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git | |||
6962 | F: Documentation/RCU/ | 6962 | F: Documentation/RCU/ |
6963 | X: Documentation/RCU/torture.txt | 6963 | X: Documentation/RCU/torture.txt |
6964 | F: include/linux/rcu* | 6964 | F: include/linux/rcu* |
6965 | F: kernel/rcu* | 6965 | X: include/linux/srcu.h |
6966 | X: kernel/rcutorture.c | 6966 | F: kernel/rcu/ |
6967 | X: kernel/rcu/torture.c | ||
6967 | 6968 | ||
6968 | REAL TIME CLOCK (RTC) SUBSYSTEM | 6969 | REAL TIME CLOCK (RTC) SUBSYSTEM |
6969 | M: Alessandro Zummo <a.zummo@towertech.it> | 6970 | M: Alessandro Zummo <a.zummo@towertech.it> |
@@ -7650,8 +7651,8 @@ M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | |||
7650 | W: http://www.rdrop.com/users/paulmck/RCU/ | 7651 | W: http://www.rdrop.com/users/paulmck/RCU/ |
7651 | S: Supported | 7652 | S: Supported |
7652 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git | 7653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git |
7653 | F: include/linux/srcu* | 7654 | F: include/linux/srcu.h |
7654 | F: kernel/srcu* | 7655 | F: kernel/rcu/srcu.c |
7655 | 7656 | ||
7656 | SMACK SECURITY MODULE | 7657 | SMACK SECURITY MODULE |
7657 | M: Casey Schaufler <casey@schaufler-ca.com> | 7658 | M: Casey Schaufler <casey@schaufler-ca.com> |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4106721c4e5e..45a0a9e81478 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -19,6 +19,21 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers | ||
23 | * @list: list to be initialized | ||
24 | * | ||
25 | * You should instead use INIT_LIST_HEAD() for normal initialization and | ||
26 | * cleanup tasks, when readers have no access to the list being initialized. | ||
27 | * However, if the list being initialized is visible to readers, you | ||
28 | * need to keep the compiler from being too mischievous. | ||
29 | */ | ||
30 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | ||
31 | { | ||
32 | ACCESS_ONCE(list->next) = list; | ||
33 | ACCESS_ONCE(list->prev) = list; | ||
34 | } | ||
35 | |||
36 | /* | ||
22 | * return the ->next pointer of a list_head in an rcu safe | 37 | * return the ->next pointer of a list_head in an rcu safe |
23 | * way, we must not access it directly | 38 | * way, we must not access it directly |
24 | */ | 39 | */ |
@@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
191 | if (list_empty(list)) | 206 | if (list_empty(list)) |
192 | return; | 207 | return; |
193 | 208 | ||
194 | /* "first" and "last" tracking list, so initialize it. */ | 209 | /* |
210 | * "first" and "last" tracking list, so initialize it. RCU readers | ||
211 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() | ||
212 | * instead of INIT_LIST_HEAD(). | ||
213 | */ | ||
195 | 214 | ||
196 | INIT_LIST_HEAD(list); | 215 | INIT_LIST_HEAD_RCU(list); |
197 | 216 | ||
198 | /* | 217 | /* |
199 | * At this point, the list body still points to the source list. | 218 | * At this point, the list body still points to the source list. |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f1f1bc39346b..39cbb889e20d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, | |||
261 | rcu_irq_exit(); \ | 261 | rcu_irq_exit(); \ |
262 | } while (0) | 262 | } while (0) |
263 | 263 | ||
264 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) | ||
265 | extern bool __rcu_is_watching(void); | ||
266 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ | ||
267 | |||
264 | /* | 268 | /* |
265 | * Infrastructure to implement the synchronize_() primitives in | 269 | * Infrastructure to implement the synchronize_() primitives in |
266 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | 270 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
297 | } | 301 | } |
298 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 302 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
299 | 303 | ||
300 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) | ||
301 | extern int rcu_is_cpu_idle(void); | ||
302 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */ | ||
303 | |||
304 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) | 304 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
305 | bool rcu_lockdep_current_cpu_online(void); | 305 | bool rcu_lockdep_current_cpu_online(void); |
306 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ | 306 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void) | |||
351 | { | 351 | { |
352 | if (!debug_lockdep_rcu_enabled()) | 352 | if (!debug_lockdep_rcu_enabled()) |
353 | return 1; | 353 | return 1; |
354 | if (rcu_is_cpu_idle()) | 354 | if (!rcu_is_watching()) |
355 | return 0; | 355 | return 0; |
356 | if (!rcu_lockdep_current_cpu_online()) | 356 | if (!rcu_lockdep_current_cpu_online()) |
357 | return 0; | 357 | return 0; |
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
402 | 402 | ||
403 | if (!debug_lockdep_rcu_enabled()) | 403 | if (!debug_lockdep_rcu_enabled()) |
404 | return 1; | 404 | return 1; |
405 | if (rcu_is_cpu_idle()) | 405 | if (!rcu_is_watching()) |
406 | return 0; | 406 | return 0; |
407 | if (!rcu_lockdep_current_cpu_online()) | 407 | if (!rcu_lockdep_current_cpu_online()) |
408 | return 0; | 408 | return 0; |
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void) | |||
771 | __rcu_read_lock(); | 771 | __rcu_read_lock(); |
772 | __acquire(RCU); | 772 | __acquire(RCU); |
773 | rcu_lock_acquire(&rcu_lock_map); | 773 | rcu_lock_acquire(&rcu_lock_map); |
774 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 774 | rcu_lockdep_assert(rcu_is_watching(), |
775 | "rcu_read_lock() used illegally while idle"); | 775 | "rcu_read_lock() used illegally while idle"); |
776 | } | 776 | } |
777 | 777 | ||
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void) | |||
792 | */ | 792 | */ |
793 | static inline void rcu_read_unlock(void) | 793 | static inline void rcu_read_unlock(void) |
794 | { | 794 | { |
795 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 795 | rcu_lockdep_assert(rcu_is_watching(), |
796 | "rcu_read_unlock() used illegally while idle"); | 796 | "rcu_read_unlock() used illegally while idle"); |
797 | rcu_lock_release(&rcu_lock_map); | 797 | rcu_lock_release(&rcu_lock_map); |
798 | __release(RCU); | 798 | __release(RCU); |
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void) | |||
821 | local_bh_disable(); | 821 | local_bh_disable(); |
822 | __acquire(RCU_BH); | 822 | __acquire(RCU_BH); |
823 | rcu_lock_acquire(&rcu_bh_lock_map); | 823 | rcu_lock_acquire(&rcu_bh_lock_map); |
824 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 824 | rcu_lockdep_assert(rcu_is_watching(), |
825 | "rcu_read_lock_bh() used illegally while idle"); | 825 | "rcu_read_lock_bh() used illegally while idle"); |
826 | } | 826 | } |
827 | 827 | ||
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void) | |||
832 | */ | 832 | */ |
833 | static inline void rcu_read_unlock_bh(void) | 833 | static inline void rcu_read_unlock_bh(void) |
834 | { | 834 | { |
835 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 835 | rcu_lockdep_assert(rcu_is_watching(), |
836 | "rcu_read_unlock_bh() used illegally while idle"); | 836 | "rcu_read_unlock_bh() used illegally while idle"); |
837 | rcu_lock_release(&rcu_bh_lock_map); | 837 | rcu_lock_release(&rcu_bh_lock_map); |
838 | __release(RCU_BH); | 838 | __release(RCU_BH); |
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void) | |||
857 | preempt_disable(); | 857 | preempt_disable(); |
858 | __acquire(RCU_SCHED); | 858 | __acquire(RCU_SCHED); |
859 | rcu_lock_acquire(&rcu_sched_lock_map); | 859 | rcu_lock_acquire(&rcu_sched_lock_map); |
860 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 860 | rcu_lockdep_assert(rcu_is_watching(), |
861 | "rcu_read_lock_sched() used illegally while idle"); | 861 | "rcu_read_lock_sched() used illegally while idle"); |
862 | } | 862 | } |
863 | 863 | ||
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
875 | */ | 875 | */ |
876 | static inline void rcu_read_unlock_sched(void) | 876 | static inline void rcu_read_unlock_sched(void) |
877 | { | 877 | { |
878 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 878 | rcu_lockdep_assert(rcu_is_watching(), |
879 | "rcu_read_unlock_sched() used illegally while idle"); | 879 | "rcu_read_unlock_sched() used illegally while idle"); |
880 | rcu_lock_release(&rcu_sched_lock_map); | 880 | rcu_lock_release(&rcu_sched_lock_map); |
881 | __release(RCU_SCHED); | 881 | __release(RCU_SCHED); |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e31005ee339e..09ebcbe9fd78 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void) | |||
132 | } | 132 | } |
133 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 133 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
134 | 134 | ||
135 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) | ||
136 | |||
137 | static inline bool rcu_is_watching(void) | ||
138 | { | ||
139 | return __rcu_is_watching(); | ||
140 | } | ||
141 | |||
142 | #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | ||
143 | |||
144 | static inline bool rcu_is_watching(void) | ||
145 | { | ||
146 | return true; | ||
147 | } | ||
148 | |||
149 | |||
150 | #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | ||
151 | |||
135 | #endif /* __LINUX_RCUTINY_H */ | 152 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 226169d1bd2b..4b9c81548742 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -90,4 +90,6 @@ extern void exit_rcu(void); | |||
90 | extern void rcu_scheduler_starting(void); | 90 | extern void rcu_scheduler_starting(void); |
91 | extern int rcu_scheduler_active __read_mostly; | 91 | extern int rcu_scheduler_active __read_mostly; |
92 | 92 | ||
93 | extern bool rcu_is_watching(void); | ||
94 | |||
93 | #endif /* __LINUX_RCUTREE_H */ | 95 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index ee2376cfaab3..aca382266411 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
@@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization, | |||
39 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 39 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Tracepoint for grace-period events: starting and ending a grace | 42 | * Tracepoint for grace-period events. Takes a string identifying the |
43 | * period ("start" and "end", respectively), a CPU noting the start | 43 | * RCU flavor, the grace-period number, and a string identifying the |
44 | * of a new grace period or the end of an old grace period ("cpustart" | 44 | * grace-period-related event as follows: |
45 | * and "cpuend", respectively), a CPU passing through a quiescent | 45 | * |
46 | * state ("cpuqs"), a CPU coming online or going offline ("cpuonl" | 46 | * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. |
47 | * and "cpuofl", respectively), a CPU being kicked for being too | 47 | * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. |
48 | * long in dyntick-idle mode ("kick"), a CPU accelerating its new | 48 | * "newreq": Request a new grace period. |
49 | * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU | 49 | * "start": Start a grace period. |
50 | * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB"). | 50 | * "cpustart": CPU first notices a grace-period start. |
51 | * "cpuqs": CPU passes through a quiescent state. | ||
52 | * "cpuonl": CPU comes online. | ||
53 | * "cpuofl": CPU goes offline. | ||
54 | * "reqwait": GP kthread sleeps waiting for grace-period request. | ||
55 | * "reqwaitsig": GP kthread awakened by signal from reqwait state. | ||
56 | * "fqswait": GP kthread waiting until time to force quiescent states. | ||
57 | * "fqsstart": GP kthread starts forcing quiescent states. | ||
58 | * "fqsend": GP kthread done forcing quiescent states. | ||
59 | * "fqswaitsig": GP kthread awakened by signal from fqswait state. | ||
60 | * "end": End a grace period. | ||
61 | * "cpuend": CPU first notices a grace-period end. | ||
51 | */ | 62 | */ |
52 | TRACE_EVENT(rcu_grace_period, | 63 | TRACE_EVENT(rcu_grace_period, |
53 | 64 | ||
@@ -161,6 +172,46 @@ TRACE_EVENT(rcu_grace_period_init, | |||
161 | ); | 172 | ); |
162 | 173 | ||
163 | /* | 174 | /* |
175 | * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended | ||
176 | * to assist debugging of these handoffs. | ||
177 | * | ||
178 | * The first argument is the name of the RCU flavor, and the second is | ||
179 | * the number of the offloaded CPU are extracted. The third and final | ||
180 | * argument is a string as follows: | ||
181 | * | ||
182 | * "WakeEmpty": Wake rcuo kthread, first CB to empty list. | ||
183 | * "WakeOvf": Wake rcuo kthread, CB list is huge. | ||
184 | * "WakeNot": Don't wake rcuo kthread. | ||
185 | * "WakeNotPoll": Don't wake rcuo kthread because it is polling. | ||
186 | * "Poll": Start of new polling cycle for rcu_nocb_poll. | ||
187 | * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. | ||
188 | * "WokeEmpty": rcuo kthread woke to find empty list. | ||
189 | * "WokeNonEmpty": rcuo kthread woke to find non-empty list. | ||
190 | * "WaitQueue": Enqueue partially done, timed wait for it to complete. | ||
191 | * "WokeQueue": Partial enqueue now complete. | ||
192 | */ | ||
193 | TRACE_EVENT(rcu_nocb_wake, | ||
194 | |||
195 | TP_PROTO(const char *rcuname, int cpu, const char *reason), | ||
196 | |||
197 | TP_ARGS(rcuname, cpu, reason), | ||
198 | |||
199 | TP_STRUCT__entry( | ||
200 | __field(const char *, rcuname) | ||
201 | __field(int, cpu) | ||
202 | __field(const char *, reason) | ||
203 | ), | ||
204 | |||
205 | TP_fast_assign( | ||
206 | __entry->rcuname = rcuname; | ||
207 | __entry->cpu = cpu; | ||
208 | __entry->reason = reason; | ||
209 | ), | ||
210 | |||
211 | TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) | ||
212 | ); | ||
213 | |||
214 | /* | ||
164 | * Tracepoint for tasks blocking within preemptible-RCU read-side | 215 | * Tracepoint for tasks blocking within preemptible-RCU read-side |
165 | * critical sections. Track the type of RCU (which one day might | 216 | * critical sections. Track the type of RCU (which one day might |
166 | * include SRCU), the grace-period number that the task is blocking | 217 | * include SRCU), the grace-period number that the task is blocking |
@@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback, | |||
540 | TRACE_EVENT(rcu_batch_end, | 591 | TRACE_EVENT(rcu_batch_end, |
541 | 592 | ||
542 | TP_PROTO(const char *rcuname, int callbacks_invoked, | 593 | TP_PROTO(const char *rcuname, int callbacks_invoked, |
543 | bool cb, bool nr, bool iit, bool risk), | 594 | char cb, char nr, char iit, char risk), |
544 | 595 | ||
545 | TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), | 596 | TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), |
546 | 597 | ||
547 | TP_STRUCT__entry( | 598 | TP_STRUCT__entry( |
548 | __field(const char *, rcuname) | 599 | __field(const char *, rcuname) |
549 | __field(int, callbacks_invoked) | 600 | __field(int, callbacks_invoked) |
550 | __field(bool, cb) | 601 | __field(char, cb) |
551 | __field(bool, nr) | 602 | __field(char, nr) |
552 | __field(bool, iit) | 603 | __field(char, iit) |
553 | __field(bool, risk) | 604 | __field(char, risk) |
554 | ), | 605 | ), |
555 | 606 | ||
556 | TP_fast_assign( | 607 | TP_fast_assign( |
@@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier, | |||
656 | #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ | 707 | #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ |
657 | level, grplo, grphi, event) \ | 708 | level, grplo, grphi, event) \ |
658 | do { } while (0) | 709 | do { } while (0) |
710 | #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) | ||
659 | #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) | 711 | #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) |
660 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) | 712 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) |
661 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ | 713 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ |
diff --git a/kernel/Makefile b/kernel/Makefile index 1ce47553fb02..f99d908b5550 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -6,9 +6,9 @@ obj-y = fork.o exec_domain.o panic.o \ | |||
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ |
7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ |
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o semaphore.o \ |
12 | notifier.o ksysfs.o cred.o reboot.o \ | 12 | notifier.o ksysfs.o cred.o reboot.o \ |
13 | async.o range.o groups.o lglock.o smpboot.o | 13 | async.o range.o groups.o lglock.o smpboot.o |
14 | 14 | ||
@@ -27,6 +27,7 @@ obj-y += power/ | |||
27 | obj-y += printk/ | 27 | obj-y += printk/ |
28 | obj-y += cpu/ | 28 | obj-y += cpu/ |
29 | obj-y += irq/ | 29 | obj-y += irq/ |
30 | obj-y += rcu/ | ||
30 | 31 | ||
31 | obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o | 32 | obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o |
32 | obj-$(CONFIG_FREEZER) += freezer.o | 33 | obj-$(CONFIG_FREEZER) += freezer.o |
@@ -81,12 +82,6 @@ obj-$(CONFIG_KGDB) += debug/ | |||
81 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o | 82 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o |
82 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o | 83 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o |
83 | obj-$(CONFIG_SECCOMP) += seccomp.o | 84 | obj-$(CONFIG_SECCOMP) += seccomp.o |
84 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | ||
85 | obj-$(CONFIG_TREE_RCU) += rcutree.o | ||
86 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o | ||
87 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | ||
88 | obj-$(CONFIG_TINY_RCU) += rcutiny.o | ||
89 | obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o | ||
90 | obj-$(CONFIG_RELAY) += relay.o | 85 | obj-$(CONFIG_RELAY) += relay.o |
91 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 86 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
92 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 87 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b9ee77..4e8e14c34e42 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4224 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", | 4224 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", |
4225 | !rcu_lockdep_current_cpu_online() | 4225 | !rcu_lockdep_current_cpu_online() |
4226 | ? "RCU used illegally from offline CPU!\n" | 4226 | ? "RCU used illegally from offline CPU!\n" |
4227 | : rcu_is_cpu_idle() | 4227 | : !rcu_is_watching() |
4228 | ? "RCU used illegally from idle CPU!\n" | 4228 | ? "RCU used illegally from idle CPU!\n" |
4229 | : "", | 4229 | : "", |
4230 | rcu_scheduler_active, debug_locks); | 4230 | rcu_scheduler_active, debug_locks); |
@@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4247 | * So complain bitterly if someone does call rcu_read_lock(), | 4247 | * So complain bitterly if someone does call rcu_read_lock(), |
4248 | * rcu_read_lock_bh() and so on from extended quiescent states. | 4248 | * rcu_read_lock_bh() and so on from extended quiescent states. |
4249 | */ | 4249 | */ |
4250 | if (rcu_is_cpu_idle()) | 4250 | if (!rcu_is_watching()) |
4251 | printk("RCU used illegally from extended quiescent state!\n"); | 4251 | printk("RCU used illegally from extended quiescent state!\n"); |
4252 | 4252 | ||
4253 | lockdep_print_held_locks(curr); | 4253 | lockdep_print_held_locks(curr); |
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile new file mode 100644 index 000000000000..01e9ec37a3e3 --- /dev/null +++ b/kernel/rcu/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | obj-y += update.o srcu.o | ||
2 | obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o | ||
3 | obj-$(CONFIG_TREE_RCU) += tree.o | ||
4 | obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o | ||
5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o | ||
6 | obj-$(CONFIG_TINY_RCU) += tiny.o | ||
diff --git a/kernel/rcu.h b/kernel/rcu/rcu.h index 77131966c4ad..7859a0a3951e 100644 --- a/kernel/rcu.h +++ b/kernel/rcu/rcu.h | |||
@@ -122,4 +122,11 @@ int rcu_jiffies_till_stall_check(void); | |||
122 | 122 | ||
123 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ | 123 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
124 | 124 | ||
125 | /* | ||
126 | * Strings used in tracepoints need to be exported via the | ||
127 | * tracing system such that tools like perf and trace-cmd can | ||
128 | * translate the string address pointers to actual text. | ||
129 | */ | ||
130 | #define TPS(x) tracepoint_string(x) | ||
131 | |||
125 | #endif /* __LINUX_RCU_H */ | 132 | #endif /* __LINUX_RCU_H */ |
diff --git a/kernel/srcu.c b/kernel/rcu/srcu.c index 01d5ccb8bfe3..01d5ccb8bfe3 100644 --- a/kernel/srcu.c +++ b/kernel/rcu/srcu.c | |||
diff --git a/kernel/rcutiny.c b/kernel/rcu/tiny.c index 9ed6075dc562..0c9a934cfec1 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcu/tiny.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/time.h> | 35 | #include <linux/time.h> |
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/prefetch.h> | 37 | #include <linux/prefetch.h> |
38 | #include <linux/ftrace_event.h> | ||
38 | 39 | ||
39 | #ifdef CONFIG_RCU_TRACE | 40 | #ifdef CONFIG_RCU_TRACE |
40 | #include <trace/events/rcu.h> | 41 | #include <trace/events/rcu.h> |
@@ -42,7 +43,7 @@ | |||
42 | 43 | ||
43 | #include "rcu.h" | 44 | #include "rcu.h" |
44 | 45 | ||
45 | /* Forward declarations for rcutiny_plugin.h. */ | 46 | /* Forward declarations for tiny_plugin.h. */ |
46 | struct rcu_ctrlblk; | 47 | struct rcu_ctrlblk; |
47 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
48 | static void rcu_process_callbacks(struct softirq_action *unused); | 49 | static void rcu_process_callbacks(struct softirq_action *unused); |
@@ -52,22 +53,23 @@ static void __call_rcu(struct rcu_head *head, | |||
52 | 53 | ||
53 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 54 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
54 | 55 | ||
55 | #include "rcutiny_plugin.h" | 56 | #include "tiny_plugin.h" |
56 | 57 | ||
57 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ | 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
58 | static void rcu_idle_enter_common(long long newval) | 59 | static void rcu_idle_enter_common(long long newval) |
59 | { | 60 | { |
60 | if (newval) { | 61 | if (newval) { |
61 | RCU_TRACE(trace_rcu_dyntick("--=", | 62 | RCU_TRACE(trace_rcu_dyntick(TPS("--="), |
62 | rcu_dynticks_nesting, newval)); | 63 | rcu_dynticks_nesting, newval)); |
63 | rcu_dynticks_nesting = newval; | 64 | rcu_dynticks_nesting = newval; |
64 | return; | 65 | return; |
65 | } | 66 | } |
66 | RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval)); | 67 | RCU_TRACE(trace_rcu_dyntick(TPS("Start"), |
68 | rcu_dynticks_nesting, newval)); | ||
67 | if (!is_idle_task(current)) { | 69 | if (!is_idle_task(current)) { |
68 | struct task_struct *idle = idle_task(smp_processor_id()); | 70 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
69 | 71 | ||
70 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", | 72 | RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), |
71 | rcu_dynticks_nesting, newval)); | 73 | rcu_dynticks_nesting, newval)); |
72 | ftrace_dump(DUMP_ALL); | 74 | ftrace_dump(DUMP_ALL); |
73 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 75 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
@@ -120,15 +122,15 @@ EXPORT_SYMBOL_GPL(rcu_irq_exit); | |||
120 | static void rcu_idle_exit_common(long long oldval) | 122 | static void rcu_idle_exit_common(long long oldval) |
121 | { | 123 | { |
122 | if (oldval) { | 124 | if (oldval) { |
123 | RCU_TRACE(trace_rcu_dyntick("++=", | 125 | RCU_TRACE(trace_rcu_dyntick(TPS("++="), |
124 | oldval, rcu_dynticks_nesting)); | 126 | oldval, rcu_dynticks_nesting)); |
125 | return; | 127 | return; |
126 | } | 128 | } |
127 | RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); | 129 | RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); |
128 | if (!is_idle_task(current)) { | 130 | if (!is_idle_task(current)) { |
129 | struct task_struct *idle = idle_task(smp_processor_id()); | 131 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
130 | 132 | ||
131 | RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", | 133 | RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), |
132 | oldval, rcu_dynticks_nesting)); | 134 | oldval, rcu_dynticks_nesting)); |
133 | ftrace_dump(DUMP_ALL); | 135 | ftrace_dump(DUMP_ALL); |
134 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 136 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
@@ -174,18 +176,18 @@ void rcu_irq_enter(void) | |||
174 | } | 176 | } |
175 | EXPORT_SYMBOL_GPL(rcu_irq_enter); | 177 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
176 | 178 | ||
177 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 179 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
178 | 180 | ||
179 | /* | 181 | /* |
180 | * Test whether RCU thinks that the current CPU is idle. | 182 | * Test whether RCU thinks that the current CPU is idle. |
181 | */ | 183 | */ |
182 | int rcu_is_cpu_idle(void) | 184 | bool __rcu_is_watching(void) |
183 | { | 185 | { |
184 | return !rcu_dynticks_nesting; | 186 | return rcu_dynticks_nesting; |
185 | } | 187 | } |
186 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 188 | EXPORT_SYMBOL(__rcu_is_watching); |
187 | 189 | ||
188 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 190 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
189 | 191 | ||
190 | /* | 192 | /* |
191 | * Test whether the current CPU was interrupted from idle. Nested | 193 | * Test whether the current CPU was interrupted from idle. Nested |
@@ -273,7 +275,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
273 | if (&rcp->rcucblist == rcp->donetail) { | 275 | if (&rcp->rcucblist == rcp->donetail) { |
274 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); | 276 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); |
275 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, | 277 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
276 | ACCESS_ONCE(rcp->rcucblist), | 278 | !!ACCESS_ONCE(rcp->rcucblist), |
277 | need_resched(), | 279 | need_resched(), |
278 | is_idle_task(current), | 280 | is_idle_task(current), |
279 | false)); | 281 | false)); |
@@ -304,7 +306,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
304 | RCU_TRACE(cb_count++); | 306 | RCU_TRACE(cb_count++); |
305 | } | 307 | } |
306 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); | 308 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
307 | RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), | 309 | RCU_TRACE(trace_rcu_batch_end(rcp->name, |
310 | cb_count, 0, need_resched(), | ||
308 | is_idle_task(current), | 311 | is_idle_task(current), |
309 | false)); | 312 | false)); |
310 | } | 313 | } |
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcu/tiny_plugin.h index 280d06cae352..280d06cae352 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h | |||
diff --git a/kernel/rcutorture.c b/kernel/rcu/torture.c index be63101c6175..3929cd451511 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcu/torture.c | |||
@@ -52,6 +52,12 @@ | |||
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); | 53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); |
54 | 54 | ||
55 | MODULE_ALIAS("rcutorture"); | ||
56 | #ifdef MODULE_PARAM_PREFIX | ||
57 | #undef MODULE_PARAM_PREFIX | ||
58 | #endif | ||
59 | #define MODULE_PARAM_PREFIX "rcutorture." | ||
60 | |||
55 | static int fqs_duration; | 61 | static int fqs_duration; |
56 | module_param(fqs_duration, int, 0444); | 62 | module_param(fqs_duration, int, 0444); |
57 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable"); | 63 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable"); |
diff --git a/kernel/rcutree.c b/kernel/rcu/tree.c index 32618b3fe4e6..8a2c81e86dda 100644 --- a/kernel/rcutree.c +++ b/kernel/rcu/tree.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/export.h> | 41 | #include <linux/export.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/moduleparam.h> | 43 | #include <linux/moduleparam.h> |
44 | #include <linux/module.h> | ||
44 | #include <linux/percpu.h> | 45 | #include <linux/percpu.h> |
45 | #include <linux/notifier.h> | 46 | #include <linux/notifier.h> |
46 | #include <linux/cpu.h> | 47 | #include <linux/cpu.h> |
@@ -56,17 +57,16 @@ | |||
56 | #include <linux/ftrace_event.h> | 57 | #include <linux/ftrace_event.h> |
57 | #include <linux/suspend.h> | 58 | #include <linux/suspend.h> |
58 | 59 | ||
59 | #include "rcutree.h" | 60 | #include "tree.h" |
60 | #include <trace/events/rcu.h> | 61 | #include <trace/events/rcu.h> |
61 | 62 | ||
62 | #include "rcu.h" | 63 | #include "rcu.h" |
63 | 64 | ||
64 | /* | 65 | MODULE_ALIAS("rcutree"); |
65 | * Strings used in tracepoints need to be exported via the | 66 | #ifdef MODULE_PARAM_PREFIX |
66 | * tracing system such that tools like perf and trace-cmd can | 67 | #undef MODULE_PARAM_PREFIX |
67 | * translate the string address pointers to actual text. | 68 | #endif |
68 | */ | 69 | #define MODULE_PARAM_PREFIX "rcutree." |
69 | #define TPS(x) tracepoint_string(x) | ||
70 | 70 | ||
71 | /* Data structures. */ | 71 | /* Data structures. */ |
72 | 72 | ||
@@ -222,7 +222,7 @@ void rcu_note_context_switch(int cpu) | |||
222 | } | 222 | } |
223 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 223 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
224 | 224 | ||
225 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 225 | static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
226 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 226 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
227 | .dynticks = ATOMIC_INIT(1), | 227 | .dynticks = ATOMIC_INIT(1), |
228 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | 228 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
@@ -371,7 +371,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
371 | { | 371 | { |
372 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); | 372 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); |
373 | if (!user && !is_idle_task(current)) { | 373 | if (!user && !is_idle_task(current)) { |
374 | struct task_struct *idle = idle_task(smp_processor_id()); | 374 | struct task_struct *idle __maybe_unused = |
375 | idle_task(smp_processor_id()); | ||
375 | 376 | ||
376 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); | 377 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); |
377 | ftrace_dump(DUMP_ORIG); | 378 | ftrace_dump(DUMP_ORIG); |
@@ -407,7 +408,7 @@ static void rcu_eqs_enter(bool user) | |||
407 | long long oldval; | 408 | long long oldval; |
408 | struct rcu_dynticks *rdtp; | 409 | struct rcu_dynticks *rdtp; |
409 | 410 | ||
410 | rdtp = &__get_cpu_var(rcu_dynticks); | 411 | rdtp = this_cpu_ptr(&rcu_dynticks); |
411 | oldval = rdtp->dynticks_nesting; | 412 | oldval = rdtp->dynticks_nesting; |
412 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 413 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); |
413 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 414 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) |
@@ -435,7 +436,7 @@ void rcu_idle_enter(void) | |||
435 | 436 | ||
436 | local_irq_save(flags); | 437 | local_irq_save(flags); |
437 | rcu_eqs_enter(false); | 438 | rcu_eqs_enter(false); |
438 | rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0); | 439 | rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0); |
439 | local_irq_restore(flags); | 440 | local_irq_restore(flags); |
440 | } | 441 | } |
441 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 442 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
@@ -478,7 +479,7 @@ void rcu_irq_exit(void) | |||
478 | struct rcu_dynticks *rdtp; | 479 | struct rcu_dynticks *rdtp; |
479 | 480 | ||
480 | local_irq_save(flags); | 481 | local_irq_save(flags); |
481 | rdtp = &__get_cpu_var(rcu_dynticks); | 482 | rdtp = this_cpu_ptr(&rcu_dynticks); |
482 | oldval = rdtp->dynticks_nesting; | 483 | oldval = rdtp->dynticks_nesting; |
483 | rdtp->dynticks_nesting--; | 484 | rdtp->dynticks_nesting--; |
484 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); | 485 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); |
@@ -508,7 +509,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | |||
508 | rcu_cleanup_after_idle(smp_processor_id()); | 509 | rcu_cleanup_after_idle(smp_processor_id()); |
509 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); | 510 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); |
510 | if (!user && !is_idle_task(current)) { | 511 | if (!user && !is_idle_task(current)) { |
511 | struct task_struct *idle = idle_task(smp_processor_id()); | 512 | struct task_struct *idle __maybe_unused = |
513 | idle_task(smp_processor_id()); | ||
512 | 514 | ||
513 | trace_rcu_dyntick(TPS("Error on exit: not idle task"), | 515 | trace_rcu_dyntick(TPS("Error on exit: not idle task"), |
514 | oldval, rdtp->dynticks_nesting); | 516 | oldval, rdtp->dynticks_nesting); |
@@ -528,7 +530,7 @@ static void rcu_eqs_exit(bool user) | |||
528 | struct rcu_dynticks *rdtp; | 530 | struct rcu_dynticks *rdtp; |
529 | long long oldval; | 531 | long long oldval; |
530 | 532 | ||
531 | rdtp = &__get_cpu_var(rcu_dynticks); | 533 | rdtp = this_cpu_ptr(&rcu_dynticks); |
532 | oldval = rdtp->dynticks_nesting; | 534 | oldval = rdtp->dynticks_nesting; |
533 | WARN_ON_ONCE(oldval < 0); | 535 | WARN_ON_ONCE(oldval < 0); |
534 | if (oldval & DYNTICK_TASK_NEST_MASK) | 536 | if (oldval & DYNTICK_TASK_NEST_MASK) |
@@ -555,7 +557,7 @@ void rcu_idle_exit(void) | |||
555 | 557 | ||
556 | local_irq_save(flags); | 558 | local_irq_save(flags); |
557 | rcu_eqs_exit(false); | 559 | rcu_eqs_exit(false); |
558 | rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0); | 560 | rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0); |
559 | local_irq_restore(flags); | 561 | local_irq_restore(flags); |
560 | } | 562 | } |
561 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 563 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
@@ -599,7 +601,7 @@ void rcu_irq_enter(void) | |||
599 | long long oldval; | 601 | long long oldval; |
600 | 602 | ||
601 | local_irq_save(flags); | 603 | local_irq_save(flags); |
602 | rdtp = &__get_cpu_var(rcu_dynticks); | 604 | rdtp = this_cpu_ptr(&rcu_dynticks); |
603 | oldval = rdtp->dynticks_nesting; | 605 | oldval = rdtp->dynticks_nesting; |
604 | rdtp->dynticks_nesting++; | 606 | rdtp->dynticks_nesting++; |
605 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); | 607 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); |
@@ -620,7 +622,7 @@ void rcu_irq_enter(void) | |||
620 | */ | 622 | */ |
621 | void rcu_nmi_enter(void) | 623 | void rcu_nmi_enter(void) |
622 | { | 624 | { |
623 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | 625 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
624 | 626 | ||
625 | if (rdtp->dynticks_nmi_nesting == 0 && | 627 | if (rdtp->dynticks_nmi_nesting == 0 && |
626 | (atomic_read(&rdtp->dynticks) & 0x1)) | 628 | (atomic_read(&rdtp->dynticks) & 0x1)) |
@@ -642,7 +644,7 @@ void rcu_nmi_enter(void) | |||
642 | */ | 644 | */ |
643 | void rcu_nmi_exit(void) | 645 | void rcu_nmi_exit(void) |
644 | { | 646 | { |
645 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | 647 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
646 | 648 | ||
647 | if (rdtp->dynticks_nmi_nesting == 0 || | 649 | if (rdtp->dynticks_nmi_nesting == 0 || |
648 | --rdtp->dynticks_nmi_nesting != 0) | 650 | --rdtp->dynticks_nmi_nesting != 0) |
@@ -655,21 +657,34 @@ void rcu_nmi_exit(void) | |||
655 | } | 657 | } |
656 | 658 | ||
657 | /** | 659 | /** |
658 | * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle | 660 | * __rcu_is_watching - are RCU read-side critical sections safe? |
661 | * | ||
662 | * Return true if RCU is watching the running CPU, which means that | ||
663 | * this CPU can safely enter RCU read-side critical sections. Unlike | ||
664 | * rcu_is_watching(), the caller of __rcu_is_watching() must have at | ||
665 | * least disabled preemption. | ||
666 | */ | ||
667 | bool __rcu_is_watching(void) | ||
668 | { | ||
669 | return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * rcu_is_watching - see if RCU thinks that the current CPU is idle | ||
659 | * | 674 | * |
660 | * If the current CPU is in its idle loop and is neither in an interrupt | 675 | * If the current CPU is in its idle loop and is neither in an interrupt |
661 | * or NMI handler, return true. | 676 | * or NMI handler, return true. |
662 | */ | 677 | */ |
663 | int rcu_is_cpu_idle(void) | 678 | bool rcu_is_watching(void) |
664 | { | 679 | { |
665 | int ret; | 680 | int ret; |
666 | 681 | ||
667 | preempt_disable(); | 682 | preempt_disable(); |
668 | ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; | 683 | ret = __rcu_is_watching(); |
669 | preempt_enable(); | 684 | preempt_enable(); |
670 | return ret; | 685 | return ret; |
671 | } | 686 | } |
672 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 687 | EXPORT_SYMBOL_GPL(rcu_is_watching); |
673 | 688 | ||
674 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 689 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
675 | 690 | ||
@@ -703,7 +718,7 @@ bool rcu_lockdep_current_cpu_online(void) | |||
703 | if (in_nmi()) | 718 | if (in_nmi()) |
704 | return 1; | 719 | return 1; |
705 | preempt_disable(); | 720 | preempt_disable(); |
706 | rdp = &__get_cpu_var(rcu_sched_data); | 721 | rdp = this_cpu_ptr(&rcu_sched_data); |
707 | rnp = rdp->mynode; | 722 | rnp = rdp->mynode; |
708 | ret = (rdp->grpmask & rnp->qsmaskinit) || | 723 | ret = (rdp->grpmask & rnp->qsmaskinit) || |
709 | !rcu_scheduler_fully_active; | 724 | !rcu_scheduler_fully_active; |
@@ -723,7 +738,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); | |||
723 | */ | 738 | */ |
724 | static int rcu_is_cpu_rrupt_from_idle(void) | 739 | static int rcu_is_cpu_rrupt_from_idle(void) |
725 | { | 740 | { |
726 | return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; | 741 | return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; |
727 | } | 742 | } |
728 | 743 | ||
729 | /* | 744 | /* |
@@ -802,8 +817,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
802 | 817 | ||
803 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 818 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
804 | { | 819 | { |
805 | rsp->gp_start = jiffies; | 820 | unsigned long j = ACCESS_ONCE(jiffies); |
806 | rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); | 821 | |
822 | rsp->gp_start = j; | ||
823 | smp_wmb(); /* Record start time before stall time. */ | ||
824 | rsp->jiffies_stall = j + rcu_jiffies_till_stall_check(); | ||
807 | } | 825 | } |
808 | 826 | ||
809 | /* | 827 | /* |
@@ -932,17 +950,48 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
932 | 950 | ||
933 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | 951 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) |
934 | { | 952 | { |
953 | unsigned long completed; | ||
954 | unsigned long gpnum; | ||
955 | unsigned long gps; | ||
935 | unsigned long j; | 956 | unsigned long j; |
936 | unsigned long js; | 957 | unsigned long js; |
937 | struct rcu_node *rnp; | 958 | struct rcu_node *rnp; |
938 | 959 | ||
939 | if (rcu_cpu_stall_suppress) | 960 | if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp)) |
940 | return; | 961 | return; |
941 | j = ACCESS_ONCE(jiffies); | 962 | j = ACCESS_ONCE(jiffies); |
963 | |||
964 | /* | ||
965 | * Lots of memory barriers to reject false positives. | ||
966 | * | ||
967 | * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall, | ||
968 | * then rsp->gp_start, and finally rsp->completed. These values | ||
969 | * are updated in the opposite order with memory barriers (or | ||
970 | * equivalent) during grace-period initialization and cleanup. | ||
971 | * Now, a false positive can occur if we get an new value of | ||
972 | * rsp->gp_start and a old value of rsp->jiffies_stall. But given | ||
973 | * the memory barriers, the only way that this can happen is if one | ||
974 | * grace period ends and another starts between these two fetches. | ||
975 | * Detect this by comparing rsp->completed with the previous fetch | ||
976 | * from rsp->gpnum. | ||
977 | * | ||
978 | * Given this check, comparisons of jiffies, rsp->jiffies_stall, | ||
979 | * and rsp->gp_start suffice to forestall false positives. | ||
980 | */ | ||
981 | gpnum = ACCESS_ONCE(rsp->gpnum); | ||
982 | smp_rmb(); /* Pick up ->gpnum first... */ | ||
942 | js = ACCESS_ONCE(rsp->jiffies_stall); | 983 | js = ACCESS_ONCE(rsp->jiffies_stall); |
984 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ | ||
985 | gps = ACCESS_ONCE(rsp->gp_start); | ||
986 | smp_rmb(); /* ...and finally ->gp_start before ->completed. */ | ||
987 | completed = ACCESS_ONCE(rsp->completed); | ||
988 | if (ULONG_CMP_GE(completed, gpnum) || | ||
989 | ULONG_CMP_LT(j, js) || | ||
990 | ULONG_CMP_GE(gps, js)) | ||
991 | return; /* No stall or GP completed since entering function. */ | ||
943 | rnp = rdp->mynode; | 992 | rnp = rdp->mynode; |
944 | if (rcu_gp_in_progress(rsp) && | 993 | if (rcu_gp_in_progress(rsp) && |
945 | (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { | 994 | (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) { |
946 | 995 | ||
947 | /* We haven't checked in, so go dump stack. */ | 996 | /* We haven't checked in, so go dump stack. */ |
948 | print_cpu_stall(rsp); | 997 | print_cpu_stall(rsp); |
@@ -1297,7 +1346,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1297 | } | 1346 | } |
1298 | 1347 | ||
1299 | /* | 1348 | /* |
1300 | * Initialize a new grace period. | 1349 | * Initialize a new grace period. Return 0 if no grace period required. |
1301 | */ | 1350 | */ |
1302 | static int rcu_gp_init(struct rcu_state *rsp) | 1351 | static int rcu_gp_init(struct rcu_state *rsp) |
1303 | { | 1352 | { |
@@ -1306,18 +1355,27 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1306 | 1355 | ||
1307 | rcu_bind_gp_kthread(); | 1356 | rcu_bind_gp_kthread(); |
1308 | raw_spin_lock_irq(&rnp->lock); | 1357 | raw_spin_lock_irq(&rnp->lock); |
1358 | if (rsp->gp_flags == 0) { | ||
1359 | /* Spurious wakeup, tell caller to go back to sleep. */ | ||
1360 | raw_spin_unlock_irq(&rnp->lock); | ||
1361 | return 0; | ||
1362 | } | ||
1309 | rsp->gp_flags = 0; /* Clear all flags: New grace period. */ | 1363 | rsp->gp_flags = 0; /* Clear all flags: New grace period. */ |
1310 | 1364 | ||
1311 | if (rcu_gp_in_progress(rsp)) { | 1365 | if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { |
1312 | /* Grace period already in progress, don't start another. */ | 1366 | /* |
1367 | * Grace period already in progress, don't start another. | ||
1368 | * Not supposed to be able to happen. | ||
1369 | */ | ||
1313 | raw_spin_unlock_irq(&rnp->lock); | 1370 | raw_spin_unlock_irq(&rnp->lock); |
1314 | return 0; | 1371 | return 0; |
1315 | } | 1372 | } |
1316 | 1373 | ||
1317 | /* Advance to a new grace period and initialize state. */ | 1374 | /* Advance to a new grace period and initialize state. */ |
1375 | record_gp_stall_check_time(rsp); | ||
1376 | smp_wmb(); /* Record GP times before starting GP. */ | ||
1318 | rsp->gpnum++; | 1377 | rsp->gpnum++; |
1319 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); | 1378 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); |
1320 | record_gp_stall_check_time(rsp); | ||
1321 | raw_spin_unlock_irq(&rnp->lock); | 1379 | raw_spin_unlock_irq(&rnp->lock); |
1322 | 1380 | ||
1323 | /* Exclude any concurrent CPU-hotplug operations. */ | 1381 | /* Exclude any concurrent CPU-hotplug operations. */ |
@@ -1366,7 +1424,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1366 | /* | 1424 | /* |
1367 | * Do one round of quiescent-state forcing. | 1425 | * Do one round of quiescent-state forcing. |
1368 | */ | 1426 | */ |
1369 | int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | 1427 | static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) |
1370 | { | 1428 | { |
1371 | int fqs_state = fqs_state_in; | 1429 | int fqs_state = fqs_state_in; |
1372 | bool isidle = false; | 1430 | bool isidle = false; |
@@ -1451,8 +1509,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1451 | rsp->fqs_state = RCU_GP_IDLE; | 1509 | rsp->fqs_state = RCU_GP_IDLE; |
1452 | rdp = this_cpu_ptr(rsp->rda); | 1510 | rdp = this_cpu_ptr(rsp->rda); |
1453 | rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ | 1511 | rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ |
1454 | if (cpu_needs_another_gp(rsp, rdp)) | 1512 | if (cpu_needs_another_gp(rsp, rdp)) { |
1455 | rsp->gp_flags = 1; | 1513 | rsp->gp_flags = RCU_GP_FLAG_INIT; |
1514 | trace_rcu_grace_period(rsp->name, | ||
1515 | ACCESS_ONCE(rsp->gpnum), | ||
1516 | TPS("newreq")); | ||
1517 | } | ||
1456 | raw_spin_unlock_irq(&rnp->lock); | 1518 | raw_spin_unlock_irq(&rnp->lock); |
1457 | } | 1519 | } |
1458 | 1520 | ||
@@ -1462,6 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1462 | static int __noreturn rcu_gp_kthread(void *arg) | 1524 | static int __noreturn rcu_gp_kthread(void *arg) |
1463 | { | 1525 | { |
1464 | int fqs_state; | 1526 | int fqs_state; |
1527 | int gf; | ||
1465 | unsigned long j; | 1528 | unsigned long j; |
1466 | int ret; | 1529 | int ret; |
1467 | struct rcu_state *rsp = arg; | 1530 | struct rcu_state *rsp = arg; |
@@ -1471,14 +1534,19 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1471 | 1534 | ||
1472 | /* Handle grace-period start. */ | 1535 | /* Handle grace-period start. */ |
1473 | for (;;) { | 1536 | for (;;) { |
1537 | trace_rcu_grace_period(rsp->name, | ||
1538 | ACCESS_ONCE(rsp->gpnum), | ||
1539 | TPS("reqwait")); | ||
1474 | wait_event_interruptible(rsp->gp_wq, | 1540 | wait_event_interruptible(rsp->gp_wq, |
1475 | rsp->gp_flags & | 1541 | ACCESS_ONCE(rsp->gp_flags) & |
1476 | RCU_GP_FLAG_INIT); | 1542 | RCU_GP_FLAG_INIT); |
1477 | if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && | 1543 | if (rcu_gp_init(rsp)) |
1478 | rcu_gp_init(rsp)) | ||
1479 | break; | 1544 | break; |
1480 | cond_resched(); | 1545 | cond_resched(); |
1481 | flush_signals(current); | 1546 | flush_signals(current); |
1547 | trace_rcu_grace_period(rsp->name, | ||
1548 | ACCESS_ONCE(rsp->gpnum), | ||
1549 | TPS("reqwaitsig")); | ||
1482 | } | 1550 | } |
1483 | 1551 | ||
1484 | /* Handle quiescent-state forcing. */ | 1552 | /* Handle quiescent-state forcing. */ |
@@ -1488,10 +1556,16 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1488 | j = HZ; | 1556 | j = HZ; |
1489 | jiffies_till_first_fqs = HZ; | 1557 | jiffies_till_first_fqs = HZ; |
1490 | } | 1558 | } |
1559 | ret = 0; | ||
1491 | for (;;) { | 1560 | for (;;) { |
1492 | rsp->jiffies_force_qs = jiffies + j; | 1561 | if (!ret) |
1562 | rsp->jiffies_force_qs = jiffies + j; | ||
1563 | trace_rcu_grace_period(rsp->name, | ||
1564 | ACCESS_ONCE(rsp->gpnum), | ||
1565 | TPS("fqswait")); | ||
1493 | ret = wait_event_interruptible_timeout(rsp->gp_wq, | 1566 | ret = wait_event_interruptible_timeout(rsp->gp_wq, |
1494 | (rsp->gp_flags & RCU_GP_FLAG_FQS) || | 1567 | ((gf = ACCESS_ONCE(rsp->gp_flags)) & |
1568 | RCU_GP_FLAG_FQS) || | ||
1495 | (!ACCESS_ONCE(rnp->qsmask) && | 1569 | (!ACCESS_ONCE(rnp->qsmask) && |
1496 | !rcu_preempt_blocked_readers_cgp(rnp)), | 1570 | !rcu_preempt_blocked_readers_cgp(rnp)), |
1497 | j); | 1571 | j); |
@@ -1500,13 +1574,23 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1500 | !rcu_preempt_blocked_readers_cgp(rnp)) | 1574 | !rcu_preempt_blocked_readers_cgp(rnp)) |
1501 | break; | 1575 | break; |
1502 | /* If time for quiescent-state forcing, do it. */ | 1576 | /* If time for quiescent-state forcing, do it. */ |
1503 | if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) { | 1577 | if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || |
1578 | (gf & RCU_GP_FLAG_FQS)) { | ||
1579 | trace_rcu_grace_period(rsp->name, | ||
1580 | ACCESS_ONCE(rsp->gpnum), | ||
1581 | TPS("fqsstart")); | ||
1504 | fqs_state = rcu_gp_fqs(rsp, fqs_state); | 1582 | fqs_state = rcu_gp_fqs(rsp, fqs_state); |
1583 | trace_rcu_grace_period(rsp->name, | ||
1584 | ACCESS_ONCE(rsp->gpnum), | ||
1585 | TPS("fqsend")); | ||
1505 | cond_resched(); | 1586 | cond_resched(); |
1506 | } else { | 1587 | } else { |
1507 | /* Deal with stray signal. */ | 1588 | /* Deal with stray signal. */ |
1508 | cond_resched(); | 1589 | cond_resched(); |
1509 | flush_signals(current); | 1590 | flush_signals(current); |
1591 | trace_rcu_grace_period(rsp->name, | ||
1592 | ACCESS_ONCE(rsp->gpnum), | ||
1593 | TPS("fqswaitsig")); | ||
1510 | } | 1594 | } |
1511 | j = jiffies_till_next_fqs; | 1595 | j = jiffies_till_next_fqs; |
1512 | if (j > HZ) { | 1596 | if (j > HZ) { |
@@ -1554,6 +1638,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1554 | return; | 1638 | return; |
1555 | } | 1639 | } |
1556 | rsp->gp_flags = RCU_GP_FLAG_INIT; | 1640 | rsp->gp_flags = RCU_GP_FLAG_INIT; |
1641 | trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), | ||
1642 | TPS("newreq")); | ||
1557 | 1643 | ||
1558 | /* | 1644 | /* |
1559 | * We can't do wakeups while holding the rnp->lock, as that | 1645 | * We can't do wakeups while holding the rnp->lock, as that |
@@ -2255,7 +2341,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, | |||
2255 | * If called from an extended quiescent state, invoke the RCU | 2341 | * If called from an extended quiescent state, invoke the RCU |
2256 | * core in order to force a re-evaluation of RCU's idleness. | 2342 | * core in order to force a re-evaluation of RCU's idleness. |
2257 | */ | 2343 | */ |
2258 | if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) | 2344 | if (!rcu_is_watching() && cpu_online(smp_processor_id())) |
2259 | invoke_rcu_core(); | 2345 | invoke_rcu_core(); |
2260 | 2346 | ||
2261 | /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ | 2347 | /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ |
@@ -2725,10 +2811,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy) | |||
2725 | 2811 | ||
2726 | for_each_rcu_flavor(rsp) { | 2812 | for_each_rcu_flavor(rsp) { |
2727 | rdp = per_cpu_ptr(rsp->rda, cpu); | 2813 | rdp = per_cpu_ptr(rsp->rda, cpu); |
2728 | if (rdp->qlen != rdp->qlen_lazy) | 2814 | if (!rdp->nxtlist) |
2815 | continue; | ||
2816 | hc = true; | ||
2817 | if (rdp->qlen != rdp->qlen_lazy || !all_lazy) { | ||
2729 | al = false; | 2818 | al = false; |
2730 | if (rdp->nxtlist) | 2819 | break; |
2731 | hc = true; | 2820 | } |
2732 | } | 2821 | } |
2733 | if (all_lazy) | 2822 | if (all_lazy) |
2734 | *all_lazy = al; | 2823 | *all_lazy = al; |
@@ -3216,7 +3305,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
3216 | 3305 | ||
3217 | /* | 3306 | /* |
3218 | * Compute the rcu_node tree geometry from kernel parameters. This cannot | 3307 | * Compute the rcu_node tree geometry from kernel parameters. This cannot |
3219 | * replace the definitions in rcutree.h because those are needed to size | 3308 | * replace the definitions in tree.h because those are needed to size |
3220 | * the ->node array in the rcu_state structure. | 3309 | * the ->node array in the rcu_state structure. |
3221 | */ | 3310 | */ |
3222 | static void __init rcu_init_geometry(void) | 3311 | static void __init rcu_init_geometry(void) |
@@ -3295,8 +3384,8 @@ void __init rcu_init(void) | |||
3295 | 3384 | ||
3296 | rcu_bootup_announce(); | 3385 | rcu_bootup_announce(); |
3297 | rcu_init_geometry(); | 3386 | rcu_init_geometry(); |
3298 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | ||
3299 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 3387 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
3388 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | ||
3300 | __rcu_init_preempt(); | 3389 | __rcu_init_preempt(); |
3301 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 3390 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
3302 | 3391 | ||
@@ -3311,4 +3400,4 @@ void __init rcu_init(void) | |||
3311 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 3400 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
3312 | } | 3401 | } |
3313 | 3402 | ||
3314 | #include "rcutree_plugin.h" | 3403 | #include "tree_plugin.h" |
diff --git a/kernel/rcutree.h b/kernel/rcu/tree.h index 5f97eab602cd..52be957c9fe2 100644 --- a/kernel/rcutree.h +++ b/kernel/rcu/tree.h | |||
@@ -104,6 +104,8 @@ struct rcu_dynticks { | |||
104 | /* idle-period nonlazy_posted snapshot. */ | 104 | /* idle-period nonlazy_posted snapshot. */ |
105 | unsigned long last_accelerate; | 105 | unsigned long last_accelerate; |
106 | /* Last jiffy CBs were accelerated. */ | 106 | /* Last jiffy CBs were accelerated. */ |
107 | unsigned long last_advance_all; | ||
108 | /* Last jiffy CBs were all advanced. */ | ||
107 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ | 109 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
108 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 110 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
109 | }; | 111 | }; |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcu/tree_plugin.h index 130c97b027f2..3822ac0c4b27 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/gfp.h> | 28 | #include <linux/gfp.h> |
29 | #include <linux/oom.h> | 29 | #include <linux/oom.h> |
30 | #include <linux/smpboot.h> | 30 | #include <linux/smpboot.h> |
31 | #include "time/tick-internal.h" | 31 | #include "../time/tick-internal.h" |
32 | 32 | ||
33 | #define RCU_KTHREAD_PRIO 1 | 33 | #define RCU_KTHREAD_PRIO 1 |
34 | 34 | ||
@@ -96,10 +96,15 @@ static void __init rcu_bootup_announce_oddness(void) | |||
96 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ | 96 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ |
97 | #ifdef CONFIG_RCU_NOCB_CPU_ALL | 97 | #ifdef CONFIG_RCU_NOCB_CPU_ALL |
98 | pr_info("\tOffload RCU callbacks from all CPUs\n"); | 98 | pr_info("\tOffload RCU callbacks from all CPUs\n"); |
99 | cpumask_setall(rcu_nocb_mask); | 99 | cpumask_copy(rcu_nocb_mask, cpu_possible_mask); |
100 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ | 100 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ |
101 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ | 101 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ |
102 | if (have_rcu_nocb_mask) { | 102 | if (have_rcu_nocb_mask) { |
103 | if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { | ||
104 | pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); | ||
105 | cpumask_and(rcu_nocb_mask, cpu_possible_mask, | ||
106 | rcu_nocb_mask); | ||
107 | } | ||
103 | cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); | 108 | cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); |
104 | pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); | 109 | pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); |
105 | if (rcu_nocb_poll) | 110 | if (rcu_nocb_poll) |
@@ -660,7 +665,7 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
660 | 665 | ||
661 | static void rcu_preempt_do_callbacks(void) | 666 | static void rcu_preempt_do_callbacks(void) |
662 | { | 667 | { |
663 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | 668 | rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); |
664 | } | 669 | } |
665 | 670 | ||
666 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 671 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1128,7 +1133,7 @@ void exit_rcu(void) | |||
1128 | 1133 | ||
1129 | #ifdef CONFIG_RCU_BOOST | 1134 | #ifdef CONFIG_RCU_BOOST |
1130 | 1135 | ||
1131 | #include "rtmutex_common.h" | 1136 | #include "../rtmutex_common.h" |
1132 | 1137 | ||
1133 | #ifdef CONFIG_RCU_TRACE | 1138 | #ifdef CONFIG_RCU_TRACE |
1134 | 1139 | ||
@@ -1332,7 +1337,7 @@ static void invoke_rcu_callbacks_kthread(void) | |||
1332 | */ | 1337 | */ |
1333 | static bool rcu_is_callbacks_kthread(void) | 1338 | static bool rcu_is_callbacks_kthread(void) |
1334 | { | 1339 | { |
1335 | return __get_cpu_var(rcu_cpu_kthread_task) == current; | 1340 | return __this_cpu_read(rcu_cpu_kthread_task) == current; |
1336 | } | 1341 | } |
1337 | 1342 | ||
1338 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | 1343 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
@@ -1382,8 +1387,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1382 | 1387 | ||
1383 | static void rcu_kthread_do_work(void) | 1388 | static void rcu_kthread_do_work(void) |
1384 | { | 1389 | { |
1385 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | 1390 | rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); |
1386 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1391 | rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); |
1387 | rcu_preempt_do_callbacks(); | 1392 | rcu_preempt_do_callbacks(); |
1388 | } | 1393 | } |
1389 | 1394 | ||
@@ -1402,7 +1407,7 @@ static void rcu_cpu_kthread_park(unsigned int cpu) | |||
1402 | 1407 | ||
1403 | static int rcu_cpu_kthread_should_run(unsigned int cpu) | 1408 | static int rcu_cpu_kthread_should_run(unsigned int cpu) |
1404 | { | 1409 | { |
1405 | return __get_cpu_var(rcu_cpu_has_work); | 1410 | return __this_cpu_read(rcu_cpu_has_work); |
1406 | } | 1411 | } |
1407 | 1412 | ||
1408 | /* | 1413 | /* |
@@ -1412,8 +1417,8 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu) | |||
1412 | */ | 1417 | */ |
1413 | static void rcu_cpu_kthread(unsigned int cpu) | 1418 | static void rcu_cpu_kthread(unsigned int cpu) |
1414 | { | 1419 | { |
1415 | unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); | 1420 | unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); |
1416 | char work, *workp = &__get_cpu_var(rcu_cpu_has_work); | 1421 | char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); |
1417 | int spincnt; | 1422 | int spincnt; |
1418 | 1423 | ||
1419 | for (spincnt = 0; spincnt < 10; spincnt++) { | 1424 | for (spincnt = 0; spincnt < 10; spincnt++) { |
@@ -1630,17 +1635,23 @@ module_param(rcu_idle_lazy_gp_delay, int, 0644); | |||
1630 | extern int tick_nohz_enabled; | 1635 | extern int tick_nohz_enabled; |
1631 | 1636 | ||
1632 | /* | 1637 | /* |
1633 | * Try to advance callbacks for all flavors of RCU on the current CPU. | 1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but |
1634 | * Afterwards, if there are any callbacks ready for immediate invocation, | 1639 | * only if it has been awhile since the last time we did so. Afterwards, |
1635 | * return true. | 1640 | * if there are any callbacks ready for immediate invocation, return true. |
1636 | */ | 1641 | */ |
1637 | static bool rcu_try_advance_all_cbs(void) | 1642 | static bool rcu_try_advance_all_cbs(void) |
1638 | { | 1643 | { |
1639 | bool cbs_ready = false; | 1644 | bool cbs_ready = false; |
1640 | struct rcu_data *rdp; | 1645 | struct rcu_data *rdp; |
1646 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
1641 | struct rcu_node *rnp; | 1647 | struct rcu_node *rnp; |
1642 | struct rcu_state *rsp; | 1648 | struct rcu_state *rsp; |
1643 | 1649 | ||
1650 | /* Exit early if we advanced recently. */ | ||
1651 | if (jiffies == rdtp->last_advance_all) | ||
1652 | return 0; | ||
1653 | rdtp->last_advance_all = jiffies; | ||
1654 | |||
1644 | for_each_rcu_flavor(rsp) { | 1655 | for_each_rcu_flavor(rsp) { |
1645 | rdp = this_cpu_ptr(rsp->rda); | 1656 | rdp = this_cpu_ptr(rsp->rda); |
1646 | rnp = rdp->mynode; | 1657 | rnp = rdp->mynode; |
@@ -1739,6 +1750,8 @@ static void rcu_prepare_for_idle(int cpu) | |||
1739 | */ | 1750 | */ |
1740 | if (rdtp->all_lazy && | 1751 | if (rdtp->all_lazy && |
1741 | rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { | 1752 | rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { |
1753 | rdtp->all_lazy = false; | ||
1754 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | ||
1742 | invoke_rcu_core(); | 1755 | invoke_rcu_core(); |
1743 | return; | 1756 | return; |
1744 | } | 1757 | } |
@@ -1768,17 +1781,11 @@ static void rcu_prepare_for_idle(int cpu) | |||
1768 | */ | 1781 | */ |
1769 | static void rcu_cleanup_after_idle(int cpu) | 1782 | static void rcu_cleanup_after_idle(int cpu) |
1770 | { | 1783 | { |
1771 | struct rcu_data *rdp; | ||
1772 | struct rcu_state *rsp; | ||
1773 | 1784 | ||
1774 | if (rcu_is_nocb_cpu(cpu)) | 1785 | if (rcu_is_nocb_cpu(cpu)) |
1775 | return; | 1786 | return; |
1776 | rcu_try_advance_all_cbs(); | 1787 | if (rcu_try_advance_all_cbs()) |
1777 | for_each_rcu_flavor(rsp) { | 1788 | invoke_rcu_core(); |
1778 | rdp = per_cpu_ptr(rsp->rda, cpu); | ||
1779 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | ||
1780 | invoke_rcu_core(); | ||
1781 | } | ||
1782 | } | 1789 | } |
1783 | 1790 | ||
1784 | /* | 1791 | /* |
@@ -2108,15 +2115,22 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2108 | 2115 | ||
2109 | /* If we are not being polled and there is a kthread, awaken it ... */ | 2116 | /* If we are not being polled and there is a kthread, awaken it ... */ |
2110 | t = ACCESS_ONCE(rdp->nocb_kthread); | 2117 | t = ACCESS_ONCE(rdp->nocb_kthread); |
2111 | if (rcu_nocb_poll | !t) | 2118 | if (rcu_nocb_poll || !t) { |
2119 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2120 | TPS("WakeNotPoll")); | ||
2112 | return; | 2121 | return; |
2122 | } | ||
2113 | len = atomic_long_read(&rdp->nocb_q_count); | 2123 | len = atomic_long_read(&rdp->nocb_q_count); |
2114 | if (old_rhpp == &rdp->nocb_head) { | 2124 | if (old_rhpp == &rdp->nocb_head) { |
2115 | wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ | 2125 | wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ |
2116 | rdp->qlen_last_fqs_check = 0; | 2126 | rdp->qlen_last_fqs_check = 0; |
2127 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeEmpty")); | ||
2117 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { | 2128 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { |
2118 | wake_up_process(t); /* ... or if many callbacks queued. */ | 2129 | wake_up_process(t); /* ... or if many callbacks queued. */ |
2119 | rdp->qlen_last_fqs_check = LONG_MAX / 2; | 2130 | rdp->qlen_last_fqs_check = LONG_MAX / 2; |
2131 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf")); | ||
2132 | } else { | ||
2133 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); | ||
2120 | } | 2134 | } |
2121 | return; | 2135 | return; |
2122 | } | 2136 | } |
@@ -2140,10 +2154,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | |||
2140 | if (__is_kfree_rcu_offset((unsigned long)rhp->func)) | 2154 | if (__is_kfree_rcu_offset((unsigned long)rhp->func)) |
2141 | trace_rcu_kfree_callback(rdp->rsp->name, rhp, | 2155 | trace_rcu_kfree_callback(rdp->rsp->name, rhp, |
2142 | (unsigned long)rhp->func, | 2156 | (unsigned long)rhp->func, |
2143 | rdp->qlen_lazy, rdp->qlen); | 2157 | -atomic_long_read(&rdp->nocb_q_count_lazy), |
2158 | -atomic_long_read(&rdp->nocb_q_count)); | ||
2144 | else | 2159 | else |
2145 | trace_rcu_callback(rdp->rsp->name, rhp, | 2160 | trace_rcu_callback(rdp->rsp->name, rhp, |
2146 | rdp->qlen_lazy, rdp->qlen); | 2161 | -atomic_long_read(&rdp->nocb_q_count_lazy), |
2162 | -atomic_long_read(&rdp->nocb_q_count)); | ||
2147 | return 1; | 2163 | return 1; |
2148 | } | 2164 | } |
2149 | 2165 | ||
@@ -2221,6 +2237,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2221 | static int rcu_nocb_kthread(void *arg) | 2237 | static int rcu_nocb_kthread(void *arg) |
2222 | { | 2238 | { |
2223 | int c, cl; | 2239 | int c, cl; |
2240 | bool firsttime = 1; | ||
2224 | struct rcu_head *list; | 2241 | struct rcu_head *list; |
2225 | struct rcu_head *next; | 2242 | struct rcu_head *next; |
2226 | struct rcu_head **tail; | 2243 | struct rcu_head **tail; |
@@ -2229,14 +2246,27 @@ static int rcu_nocb_kthread(void *arg) | |||
2229 | /* Each pass through this loop invokes one batch of callbacks */ | 2246 | /* Each pass through this loop invokes one batch of callbacks */ |
2230 | for (;;) { | 2247 | for (;;) { |
2231 | /* If not polling, wait for next batch of callbacks. */ | 2248 | /* If not polling, wait for next batch of callbacks. */ |
2232 | if (!rcu_nocb_poll) | 2249 | if (!rcu_nocb_poll) { |
2250 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2251 | TPS("Sleep")); | ||
2233 | wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); | 2252 | wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); |
2253 | } else if (firsttime) { | ||
2254 | firsttime = 0; | ||
2255 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2256 | TPS("Poll")); | ||
2257 | } | ||
2234 | list = ACCESS_ONCE(rdp->nocb_head); | 2258 | list = ACCESS_ONCE(rdp->nocb_head); |
2235 | if (!list) { | 2259 | if (!list) { |
2260 | if (!rcu_nocb_poll) | ||
2261 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2262 | TPS("WokeEmpty")); | ||
2236 | schedule_timeout_interruptible(1); | 2263 | schedule_timeout_interruptible(1); |
2237 | flush_signals(current); | 2264 | flush_signals(current); |
2238 | continue; | 2265 | continue; |
2239 | } | 2266 | } |
2267 | firsttime = 1; | ||
2268 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2269 | TPS("WokeNonEmpty")); | ||
2240 | 2270 | ||
2241 | /* | 2271 | /* |
2242 | * Extract queued callbacks, update counts, and wait | 2272 | * Extract queued callbacks, update counts, and wait |
@@ -2257,7 +2287,11 @@ static int rcu_nocb_kthread(void *arg) | |||
2257 | next = list->next; | 2287 | next = list->next; |
2258 | /* Wait for enqueuing to complete, if needed. */ | 2288 | /* Wait for enqueuing to complete, if needed. */ |
2259 | while (next == NULL && &list->next != tail) { | 2289 | while (next == NULL && &list->next != tail) { |
2290 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2291 | TPS("WaitQueue")); | ||
2260 | schedule_timeout_interruptible(1); | 2292 | schedule_timeout_interruptible(1); |
2293 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2294 | TPS("WokeQueue")); | ||
2261 | next = list->next; | 2295 | next = list->next; |
2262 | } | 2296 | } |
2263 | debug_rcu_head_unqueue(list); | 2297 | debug_rcu_head_unqueue(list); |
diff --git a/kernel/rcutree_trace.c b/kernel/rcu/tree_trace.c index cf6c17412932..3596797b7e46 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcu/tree_trace.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | 45 | ||
46 | #define RCU_TREE_NONCORE | 46 | #define RCU_TREE_NONCORE |
47 | #include "rcutree.h" | 47 | #include "tree.h" |
48 | 48 | ||
49 | static int r_open(struct inode *inode, struct file *file, | 49 | static int r_open(struct inode *inode, struct file *file, |
50 | const struct seq_operations *op) | 50 | const struct seq_operations *op) |
diff --git a/kernel/rcupdate.c b/kernel/rcu/update.c index b02a339836b4..6cb3dff89e2b 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcu/update.c | |||
@@ -53,6 +53,12 @@ | |||
53 | 53 | ||
54 | #include "rcu.h" | 54 | #include "rcu.h" |
55 | 55 | ||
56 | MODULE_ALIAS("rcupdate"); | ||
57 | #ifdef MODULE_PARAM_PREFIX | ||
58 | #undef MODULE_PARAM_PREFIX | ||
59 | #endif | ||
60 | #define MODULE_PARAM_PREFIX "rcupdate." | ||
61 | |||
56 | module_param(rcu_expedited, int, 0); | 62 | module_param(rcu_expedited, int, 0); |
57 | 63 | ||
58 | #ifdef CONFIG_PREEMPT_RCU | 64 | #ifdef CONFIG_PREEMPT_RCU |
@@ -148,7 +154,7 @@ int rcu_read_lock_bh_held(void) | |||
148 | { | 154 | { |
149 | if (!debug_lockdep_rcu_enabled()) | 155 | if (!debug_lockdep_rcu_enabled()) |
150 | return 1; | 156 | return 1; |
151 | if (rcu_is_cpu_idle()) | 157 | if (!rcu_is_watching()) |
152 | return 0; | 158 | return 0; |
153 | if (!rcu_lockdep_current_cpu_online()) | 159 | if (!rcu_lockdep_current_cpu_online()) |
154 | return 0; | 160 | return 0; |
@@ -298,7 +304,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); | |||
298 | #endif | 304 | #endif |
299 | 305 | ||
300 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ | 306 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ |
301 | int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; | 307 | static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
302 | 308 | ||
303 | module_param(rcu_cpu_stall_suppress, int, 0644); | 309 | module_param(rcu_cpu_stall_suppress, int, 0644); |
304 | module_param(rcu_cpu_stall_timeout, int, 0644); | 310 | module_param(rcu_cpu_stall_timeout, int, 0644); |