diff options
| -rw-r--r-- | Documentation/RCU/stallwarn.txt | 94 | ||||
| -rw-r--r-- | Documentation/RCU/trace.txt | 35 | ||||
| -rw-r--r-- | include/linux/debugobjects.h | 11 | ||||
| -rw-r--r-- | include/linux/init_task.h | 1 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 50 | ||||
| -rw-r--r-- | include/linux/rcutiny.h | 29 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 6 | ||||
| -rw-r--r-- | include/linux/srcu.h | 6 | ||||
| -rw-r--r-- | kernel/lockdep.c | 3 | ||||
| -rw-r--r-- | kernel/rcupdate.c | 19 | ||||
| -rw-r--r-- | kernel/rcutiny.c | 35 | ||||
| -rw-r--r-- | kernel/rcutiny_plugin.h | 39 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 2 | ||||
| -rw-r--r-- | kernel/rcutree.c | 131 | ||||
| -rw-r--r-- | kernel/rcutree.h | 2 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 69 | ||||
| -rw-r--r-- | kernel/rcutree_trace.c | 4 | ||||
| -rw-r--r-- | kernel/sched.c | 2 | ||||
| -rw-r--r-- | kernel/softirq.c | 2 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 14 | ||||
| -rw-r--r-- | lib/debugobjects.c | 59 |
21 files changed, 470 insertions, 143 deletions
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 1423d2570d78..44c6dcc93d6d 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt | |||
| @@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector | |||
| 3 | The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables | 3 | The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables |
| 4 | RCU's CPU stall detector, which detects conditions that unduly delay | 4 | RCU's CPU stall detector, which detects conditions that unduly delay |
| 5 | RCU grace periods. The stall detector's idea of what constitutes | 5 | RCU grace periods. The stall detector's idea of what constitutes |
| 6 | "unduly delayed" is controlled by a pair of C preprocessor macros: | 6 | "unduly delayed" is controlled by a set of C preprocessor macros: |
| 7 | 7 | ||
| 8 | RCU_SECONDS_TILL_STALL_CHECK | 8 | RCU_SECONDS_TILL_STALL_CHECK |
| 9 | 9 | ||
| 10 | This macro defines the period of time that RCU will wait from | 10 | This macro defines the period of time that RCU will wait from |
| 11 | the beginning of a grace period until it issues an RCU CPU | 11 | the beginning of a grace period until it issues an RCU CPU |
| 12 | stall warning. It is normally ten seconds. | 12 | stall warning. This time period is normally ten seconds. |
| 13 | 13 | ||
| 14 | RCU_SECONDS_TILL_STALL_RECHECK | 14 | RCU_SECONDS_TILL_STALL_RECHECK |
| 15 | 15 | ||
| 16 | This macro defines the period of time that RCU will wait after | 16 | This macro defines the period of time that RCU will wait after |
| 17 | issuing a stall warning until it issues another stall warning. | 17 | issuing a stall warning until it issues another stall warning |
| 18 | It is normally set to thirty seconds. | 18 | for the same stall. This time period is normally set to thirty |
| 19 | seconds. | ||
| 19 | 20 | ||
| 20 | RCU_STALL_RAT_DELAY | 21 | RCU_STALL_RAT_DELAY |
| 21 | 22 | ||
| 22 | The CPU stall detector tries to make the offending CPU rat on itself, | 23 | The CPU stall detector tries to make the offending CPU print its |
| 23 | as this often gives better-quality stack traces. However, if | 24 | own warnings, as this often gives better-quality stack traces. |
| 24 | the offending CPU does not detect its own stall in the number | 25 | However, if the offending CPU does not detect its own stall in |
| 25 | of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will | 26 | the number of jiffies specified by RCU_STALL_RAT_DELAY, then |
| 26 | complain. This is normally set to two jiffies. | 27 | some other CPU will complain. This delay is normally set to |
| 28 | two jiffies. | ||
| 27 | 29 | ||
| 28 | The following problems can result in an RCU CPU stall warning: | 30 | When a CPU detects that it is stalling, it will print a message similar |
| 31 | to the following: | ||
| 32 | |||
| 33 | INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies) | ||
| 34 | |||
| 35 | This message indicates that CPU 5 detected that it was causing a stall, | ||
| 36 | and that the stall was affecting RCU-sched. This message will normally be | ||
| 37 | followed by a stack dump of the offending CPU. On TREE_RCU kernel builds, | ||
| 38 | RCU and RCU-sched are implemented by the same underlying mechanism, | ||
| 39 | while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented | ||
| 40 | by rcu_preempt_state. | ||
| 41 | |||
| 42 | On the other hand, if the offending CPU fails to print out a stall-warning | ||
| 43 | message quickly enough, some other CPU will print a message similar to | ||
| 44 | the following: | ||
| 45 | |||
| 46 | INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies) | ||
| 47 | |||
| 48 | This message indicates that CPU 2 detected that CPUs 3 and 5 were both | ||
| 49 | causing stalls, and that the stall was affecting RCU-bh. This message | ||
| 50 | will normally be followed by stack dumps for each CPU. Please note that | ||
| 51 | TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, | ||
| 52 | and that the tasks will be indicated by PID, for example, "P3421". | ||
| 53 | It is even possible for a rcu_preempt_state stall to be caused by both | ||
| 54 | CPUs -and- tasks, in which case the offending CPUs and tasks will all | ||
| 55 | be called out in the list. | ||
| 56 | |||
| 57 | Finally, if the grace period ends just as the stall warning starts | ||
| 58 | printing, there will be a spurious stall-warning message: | ||
| 59 | |||
| 60 | INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies) | ||
| 61 | |||
| 62 | This is rare, but does happen from time to time in real life. | ||
| 63 | |||
| 64 | So your kernel printed an RCU CPU stall warning. The next question is | ||
| 65 | "What caused it?" The following problems can result in RCU CPU stall | ||
| 66 | warnings: | ||
| 29 | 67 | ||
| 30 | o A CPU looping in an RCU read-side critical section. | 68 | o A CPU looping in an RCU read-side critical section. |
| 31 | 69 | ||
| 32 | o A CPU looping with interrupts disabled. | 70 | o A CPU looping with interrupts disabled. This condition can |
| 71 | result in RCU-sched and RCU-bh stalls. | ||
| 33 | 72 | ||
| 34 | o A CPU looping with preemption disabled. | 73 | o A CPU looping with preemption disabled. This condition can |
| 74 | result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh | ||
| 75 | stalls. | ||
| 76 | |||
| 77 | o A CPU looping with bottom halves disabled. This condition can | ||
| 78 | result in RCU-sched and RCU-bh stalls. | ||
| 35 | 79 | ||
| 36 | o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel | 80 | o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel |
| 37 | without invoking schedule(). | 81 | without invoking schedule(). |
| @@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel | |||
| 39 | o A bug in the RCU implementation. | 83 | o A bug in the RCU implementation. |
| 40 | 84 | ||
| 41 | o A hardware failure. This is quite unlikely, but has occurred | 85 | o A hardware failure. This is quite unlikely, but has occurred |
| 42 | at least once in a former life. A CPU failed in a running system, | 86 | at least once in real life. A CPU failed in a running system, |
| 43 | becoming unresponsive, but not causing an immediate crash. | 87 | becoming unresponsive, but not causing an immediate crash. |
| 44 | This resulted in a series of RCU CPU stall warnings, eventually | 88 | This resulted in a series of RCU CPU stall warnings, eventually |
| 45 | leading the realization that the CPU had failed. | 89 | leading the realization that the CPU had failed. |
| 46 | 90 | ||
| 47 | The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning. | 91 | The RCU, RCU-sched, and RCU-bh implementations have CPU stall |
| 48 | SRCU does not do so directly, but its calls to synchronize_sched() will | 92 | warning. SRCU does not have its own CPU stall warnings, but its |
| 49 | result in RCU-sched detecting any CPU stalls that might be occurring. | 93 | calls to synchronize_sched() will result in RCU-sched detecting |
| 50 | 94 | RCU-sched-related CPU stalls. Please note that RCU only detects | |
| 51 | To diagnose the cause of the stall, inspect the stack traces. The offending | 95 | CPU stalls when there is a grace period in progress. No grace period, |
| 52 | function will usually be near the top of the stack. If you have a series | 96 | no CPU stall warnings. |
| 53 | of stall warnings from a single extended stall, comparing the stack traces | 97 | |
| 54 | can often help determine where the stall is occurring, which will usually | 98 | To diagnose the cause of the stall, inspect the stack traces. |
| 55 | be in the function nearest the top of the stack that stays the same from | 99 | The offending function will usually be near the top of the stack. |
| 56 | trace to trace. | 100 | If you have a series of stall warnings from a single extended stall, |
| 101 | comparing the stack traces can often help determine where the stall | ||
| 102 | is occurring, which will usually be in the function nearest the top of | ||
| 103 | that portion of the stack which remains the same from trace to trace. | ||
| 104 | If you can reliably trigger the stall, ftrace can be quite helpful. | ||
| 57 | 105 | ||
| 58 | RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. | 106 | RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. |
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index 8608fd85e921..efd8cc95c06b 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt | |||
| @@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0" represents one struct | |||
| 256 | The output of "cat rcu/rcu_pending" looks as follows: | 256 | The output of "cat rcu/rcu_pending" looks as follows: |
| 257 | 257 | ||
| 258 | rcu_sched: | 258 | rcu_sched: |
| 259 | 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 | 259 | 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 |
| 260 | 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 | 260 | 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 |
| 261 | 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 | 261 | 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 |
| 262 | 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 | 262 | 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 |
| 263 | 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 | 263 | 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 |
| 264 | 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 | 264 | 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 |
| 265 | 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 | 265 | 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 |
| 266 | 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 | 266 | 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 |
| 267 | rcu_bh: | 267 | rcu_bh: |
| 268 | 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 | 268 | 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 |
| 269 | 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 | 269 | 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 |
| 270 | 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 | 270 | 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 |
| 271 | 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 | 271 | 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 |
| 272 | 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 | 272 | 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 |
| 273 | 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 | 273 | 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 |
| 274 | 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 | 274 | 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 |
| 275 | 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 | 275 | 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 |
| 276 | 276 | ||
| 277 | As always, this is once again split into "rcu_sched" and "rcu_bh" | 277 | As always, this is once again split into "rcu_sched" and "rcu_bh" |
| 278 | portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional | 278 | portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional |
| @@ -284,6 +284,9 @@ o "np" is the number of times that __rcu_pending() has been invoked | |||
| 284 | o "qsp" is the number of times that the RCU was waiting for a | 284 | o "qsp" is the number of times that the RCU was waiting for a |
| 285 | quiescent state from this CPU. | 285 | quiescent state from this CPU. |
| 286 | 286 | ||
| 287 | o "rpq" is the number of times that the CPU had passed through | ||
| 288 | a quiescent state, but not yet reported it to RCU. | ||
| 289 | |||
| 287 | o "cbr" is the number of times that this CPU had RCU callbacks | 290 | o "cbr" is the number of times that this CPU had RCU callbacks |
| 288 | that had passed through a grace period, and were thus ready | 291 | that had passed through a grace period, and were thus ready |
| 289 | to be invoked. | 292 | to be invoked. |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8c243aaa86a7..597692f1fc8d 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
| @@ -20,12 +20,14 @@ struct debug_obj_descr; | |||
| 20 | * struct debug_obj - representaion of an tracked object | 20 | * struct debug_obj - representaion of an tracked object |
| 21 | * @node: hlist node to link the object into the tracker list | 21 | * @node: hlist node to link the object into the tracker list |
| 22 | * @state: tracked object state | 22 | * @state: tracked object state |
| 23 | * @astate: current active state | ||
| 23 | * @object: pointer to the real object | 24 | * @object: pointer to the real object |
| 24 | * @descr: pointer to an object type specific debug description structure | 25 | * @descr: pointer to an object type specific debug description structure |
| 25 | */ | 26 | */ |
| 26 | struct debug_obj { | 27 | struct debug_obj { |
| 27 | struct hlist_node node; | 28 | struct hlist_node node; |
| 28 | enum debug_obj_state state; | 29 | enum debug_obj_state state; |
| 30 | unsigned int astate; | ||
| 29 | void *object; | 31 | void *object; |
| 30 | struct debug_obj_descr *descr; | 32 | struct debug_obj_descr *descr; |
| 31 | }; | 33 | }; |
| @@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | |||
| 60 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 62 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); |
| 61 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 63 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); |
| 62 | 64 | ||
| 65 | /* | ||
| 66 | * Active state: | ||
| 67 | * - Set at 0 upon initialization. | ||
| 68 | * - Must return to 0 before deactivation. | ||
| 69 | */ | ||
| 70 | extern void | ||
| 71 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | ||
| 72 | unsigned int expect, unsigned int next); | ||
| 73 | |||
| 63 | extern void debug_objects_early_init(void); | 74 | extern void debug_objects_early_init(void); |
| 64 | extern void debug_objects_mem_init(void); | 75 | extern void debug_objects_mem_init(void); |
| 65 | #else | 76 | #else |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b1ed1cd8e2a8..7996fc2c9ba9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -49,7 +49,6 @@ extern struct group_info init_groups; | |||
| 49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ | 49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ |
| 50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ | 50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ |
| 51 | }, \ | 51 | }, \ |
| 52 | .rcu = RCU_HEAD_INIT, \ | ||
| 53 | .level = 0, \ | 52 | .level = 0, \ |
| 54 | .numbers = { { \ | 53 | .numbers = { { \ |
| 55 | .nr = 0, \ | 54 | .nr = 0, \ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index db266bbed23f..b653b4aaa8a6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -56,8 +56,6 @@ struct rcu_head { | |||
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | /* Exported common interfaces */ | 58 | /* Exported common interfaces */ |
| 59 | extern void synchronize_rcu_bh(void); | ||
| 60 | extern void synchronize_sched(void); | ||
| 61 | extern void rcu_barrier(void); | 59 | extern void rcu_barrier(void); |
| 62 | extern void rcu_barrier_bh(void); | 60 | extern void rcu_barrier_bh(void); |
| 63 | extern void rcu_barrier_sched(void); | 61 | extern void rcu_barrier_sched(void); |
| @@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page); | |||
| 66 | 64 | ||
| 67 | /* Internal to kernel */ | 65 | /* Internal to kernel */ |
| 68 | extern void rcu_init(void); | 66 | extern void rcu_init(void); |
| 69 | extern int rcu_scheduler_active; | ||
| 70 | extern void rcu_scheduler_starting(void); | ||
| 71 | 67 | ||
| 72 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 68 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 73 | #include <linux/rcutree.h> | 69 | #include <linux/rcutree.h> |
| @@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void); | |||
| 83 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 79 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
| 84 | } while (0) | 80 | } while (0) |
| 85 | 81 | ||
| 82 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | ||
| 83 | { | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | ||
| 87 | { | ||
| 88 | } | ||
| 89 | |||
| 86 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 90 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 87 | 91 | ||
| 88 | extern struct lockdep_map rcu_lock_map; | 92 | extern struct lockdep_map rcu_lock_map; |
| @@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void); | |||
| 106 | /** | 110 | /** |
| 107 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 111 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
| 108 | * | 112 | * |
| 109 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 113 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
| 110 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 114 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
| 111 | * this assumes we are in an RCU read-side critical section unless it can | 115 | * this assumes we are in an RCU read-side critical section unless it can |
| 112 | * prove otherwise. | 116 | * prove otherwise. |
| 113 | * | 117 | * |
| 114 | * Check rcu_scheduler_active to prevent false positives during boot. | 118 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
| 119 | * and while lockdep is disabled. | ||
| 115 | */ | 120 | */ |
| 116 | static inline int rcu_read_lock_held(void) | 121 | static inline int rcu_read_lock_held(void) |
| 117 | { | 122 | { |
| @@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void); | |||
| 129 | /** | 134 | /** |
| 130 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 135 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? |
| 131 | * | 136 | * |
| 132 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | 137 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
| 133 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 138 | * RCU-sched read-side critical section. In absence of |
| 134 | * this assumes we are in an RCU-sched read-side critical section unless it | 139 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
| 135 | * can prove otherwise. Note that disabling of preemption (including | 140 | * critical section unless it can prove otherwise. Note that disabling |
| 136 | * disabling irqs) counts as an RCU-sched read-side critical section. | 141 | * of preemption (including disabling irqs) counts as an RCU-sched |
| 142 | * read-side critical section. | ||
| 137 | * | 143 | * |
| 138 | * Check rcu_scheduler_active to prevent false positives during boot. | 144 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
| 145 | * and while lockdep is disabled. | ||
| 139 | */ | 146 | */ |
| 140 | #ifdef CONFIG_PREEMPT | 147 | #ifdef CONFIG_PREEMPT |
| 141 | static inline int rcu_read_lock_sched_held(void) | 148 | static inline int rcu_read_lock_sched_held(void) |
| @@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
| 177 | #ifdef CONFIG_PREEMPT | 184 | #ifdef CONFIG_PREEMPT |
| 178 | static inline int rcu_read_lock_sched_held(void) | 185 | static inline int rcu_read_lock_sched_held(void) |
| 179 | { | 186 | { |
| 180 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); | 187 | return preempt_count() != 0 || irqs_disabled(); |
| 181 | } | 188 | } |
| 182 | #else /* #ifdef CONFIG_PREEMPT */ | 189 | #else /* #ifdef CONFIG_PREEMPT */ |
| 183 | static inline int rcu_read_lock_sched_held(void) | 190 | static inline int rcu_read_lock_sched_held(void) |
| @@ -192,6 +199,15 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 192 | 199 | ||
| 193 | extern int rcu_my_thread_group_empty(void); | 200 | extern int rcu_my_thread_group_empty(void); |
| 194 | 201 | ||
| 202 | #define __do_rcu_dereference_check(c) \ | ||
| 203 | do { \ | ||
| 204 | static bool __warned; \ | ||
| 205 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | ||
| 206 | __warned = true; \ | ||
| 207 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 208 | } \ | ||
| 209 | } while (0) | ||
| 210 | |||
| 195 | /** | 211 | /** |
| 196 | * rcu_dereference_check - rcu_dereference with debug checking | 212 | * rcu_dereference_check - rcu_dereference with debug checking |
| 197 | * @p: The pointer to read, prior to dereferencing | 213 | * @p: The pointer to read, prior to dereferencing |
| @@ -221,8 +237,7 @@ extern int rcu_my_thread_group_empty(void); | |||
| 221 | */ | 237 | */ |
| 222 | #define rcu_dereference_check(p, c) \ | 238 | #define rcu_dereference_check(p, c) \ |
| 223 | ({ \ | 239 | ({ \ |
| 224 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 240 | __do_rcu_dereference_check(c); \ |
| 225 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 226 | rcu_dereference_raw(p); \ | 241 | rcu_dereference_raw(p); \ |
| 227 | }) | 242 | }) |
| 228 | 243 | ||
| @@ -239,8 +254,7 @@ extern int rcu_my_thread_group_empty(void); | |||
| 239 | */ | 254 | */ |
| 240 | #define rcu_dereference_protected(p, c) \ | 255 | #define rcu_dereference_protected(p, c) \ |
| 241 | ({ \ | 256 | ({ \ |
| 242 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 257 | __do_rcu_dereference_check(c); \ |
| 243 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 244 | (p); \ | 258 | (p); \ |
| 245 | }) | 259 | }) |
| 246 | 260 | ||
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a5195875480a..14e5a76b2c06 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -29,6 +29,10 @@ | |||
| 29 | 29 | ||
| 30 | void rcu_sched_qs(int cpu); | 30 | void rcu_sched_qs(int cpu); |
| 31 | void rcu_bh_qs(int cpu); | 31 | void rcu_bh_qs(int cpu); |
| 32 | static inline void rcu_note_context_switch(int cpu) | ||
| 33 | { | ||
| 34 | rcu_sched_qs(cpu); | ||
| 35 | } | ||
| 32 | 36 | ||
| 33 | #define __rcu_read_lock() preempt_disable() | 37 | #define __rcu_read_lock() preempt_disable() |
| 34 | #define __rcu_read_unlock() preempt_enable() | 38 | #define __rcu_read_unlock() preempt_enable() |
| @@ -74,7 +78,17 @@ static inline void rcu_sched_force_quiescent_state(void) | |||
| 74 | { | 78 | { |
| 75 | } | 79 | } |
| 76 | 80 | ||
| 77 | #define synchronize_rcu synchronize_sched | 81 | extern void synchronize_sched(void); |
| 82 | |||
| 83 | static inline void synchronize_rcu(void) | ||
| 84 | { | ||
| 85 | synchronize_sched(); | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline void synchronize_rcu_bh(void) | ||
| 89 | { | ||
| 90 | synchronize_sched(); | ||
| 91 | } | ||
| 78 | 92 | ||
| 79 | static inline void synchronize_rcu_expedited(void) | 93 | static inline void synchronize_rcu_expedited(void) |
| 80 | { | 94 | { |
| @@ -114,4 +128,17 @@ static inline int rcu_preempt_depth(void) | |||
| 114 | return 0; | 128 | return 0; |
| 115 | } | 129 | } |
| 116 | 130 | ||
| 131 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 132 | |||
| 133 | extern int rcu_scheduler_active __read_mostly; | ||
| 134 | extern void rcu_scheduler_starting(void); | ||
| 135 | |||
| 136 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 137 | |||
| 138 | static inline void rcu_scheduler_starting(void) | ||
| 139 | { | ||
| 140 | } | ||
| 141 | |||
| 142 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 143 | |||
| 117 | #endif /* __LINUX_RCUTINY_H */ | 144 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 42cc3a04779e..48282055e83d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -34,6 +34,7 @@ struct notifier_block; | |||
| 34 | 34 | ||
| 35 | extern void rcu_sched_qs(int cpu); | 35 | extern void rcu_sched_qs(int cpu); |
| 36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); |
| 37 | extern void rcu_note_context_switch(int cpu); | ||
| 37 | extern int rcu_needs_cpu(int cpu); | 38 | extern int rcu_needs_cpu(int cpu); |
| 38 | extern int rcu_expedited_torture_stats(char *page); | 39 | extern int rcu_expedited_torture_stats(char *page); |
| 39 | 40 | ||
| @@ -86,6 +87,8 @@ static inline void __rcu_read_unlock_bh(void) | |||
| 86 | 87 | ||
| 87 | extern void call_rcu_sched(struct rcu_head *head, | 88 | extern void call_rcu_sched(struct rcu_head *head, |
| 88 | void (*func)(struct rcu_head *rcu)); | 89 | void (*func)(struct rcu_head *rcu)); |
| 90 | extern void synchronize_rcu_bh(void); | ||
| 91 | extern void synchronize_sched(void); | ||
| 89 | extern void synchronize_rcu_expedited(void); | 92 | extern void synchronize_rcu_expedited(void); |
| 90 | 93 | ||
| 91 | static inline void synchronize_rcu_bh_expedited(void) | 94 | static inline void synchronize_rcu_bh_expedited(void) |
| @@ -120,4 +123,7 @@ static inline int rcu_blocking_is_gp(void) | |||
| 120 | return num_online_cpus() == 1; | 123 | return num_online_cpus() == 1; |
| 121 | } | 124 | } |
| 122 | 125 | ||
| 126 | extern void rcu_scheduler_starting(void); | ||
| 127 | extern int rcu_scheduler_active __read_mostly; | ||
| 128 | |||
| 123 | #endif /* __LINUX_RCUTREE_H */ | 129 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5ecb222af9..4d5d2f546dbf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | #ifndef _LINUX_SRCU_H | 27 | #ifndef _LINUX_SRCU_H |
| 28 | #define _LINUX_SRCU_H | 28 | #define _LINUX_SRCU_H |
| 29 | 29 | ||
| 30 | #include <linux/mutex.h> | ||
| 31 | |||
| 30 | struct srcu_struct_array { | 32 | struct srcu_struct_array { |
| 31 | int c[2]; | 33 | int c[2]; |
| 32 | }; | 34 | }; |
| @@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
| 84 | /** | 86 | /** |
| 85 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 87 | * srcu_read_lock_held - might we be in SRCU read-side critical section? |
| 86 | * | 88 | * |
| 87 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 89 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU |
| 88 | * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 90 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
| 89 | * this assumes we are in an SRCU read-side critical section unless it can | 91 | * this assumes we are in an SRCU read-side critical section unless it can |
| 90 | * prove otherwise. | 92 | * prove otherwise. |
| 91 | */ | 93 | */ |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 51080807dc8c..4349e9793419 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -3806,8 +3806,11 @@ void lockdep_rcu_dereference(const char *file, const int line) | |||
| 3806 | { | 3806 | { |
| 3807 | struct task_struct *curr = current; | 3807 | struct task_struct *curr = current; |
| 3808 | 3808 | ||
| 3809 | #ifndef CONFIG_PROVE_RCU_REPEATEDLY | ||
| 3809 | if (!debug_locks_off()) | 3810 | if (!debug_locks_off()) |
| 3810 | return; | 3811 | return; |
| 3812 | #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ | ||
| 3813 | /* Note: the following can be executed concurrently, so be careful. */ | ||
| 3811 | printk("\n===================================================\n"); | 3814 | printk("\n===================================================\n"); |
| 3812 | printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); | 3815 | printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); |
| 3813 | printk( "---------------------------------------------------\n"); | 3816 | printk( "---------------------------------------------------\n"); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 49d808e833b0..72a8dc9567f5 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -44,7 +44,6 @@ | |||
| 44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
| 45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
| 46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
| 47 | #include <linux/kernel_stat.h> | ||
| 48 | #include <linux/hardirq.h> | 47 | #include <linux/hardirq.h> |
| 49 | 48 | ||
| 50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| @@ -64,9 +63,6 @@ struct lockdep_map rcu_sched_lock_map = | |||
| 64 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | 63 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
| 65 | #endif | 64 | #endif |
| 66 | 65 | ||
| 67 | int rcu_scheduler_active __read_mostly; | ||
| 68 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
| 69 | |||
| 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 66 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 71 | 67 | ||
| 72 | int debug_lockdep_rcu_enabled(void) | 68 | int debug_lockdep_rcu_enabled(void) |
| @@ -97,21 +93,6 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | |||
| 97 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 93 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 98 | 94 | ||
| 99 | /* | 95 | /* |
| 100 | * This function is invoked towards the end of the scheduler's initialization | ||
| 101 | * process. Before this is called, the idle task might contain | ||
| 102 | * RCU read-side critical sections (during which time, this idle | ||
| 103 | * task is booting the system). After this function is called, the | ||
| 104 | * idle tasks are prohibited from containing RCU read-side critical | ||
| 105 | * sections. | ||
| 106 | */ | ||
| 107 | void rcu_scheduler_starting(void) | ||
| 108 | { | ||
| 109 | WARN_ON(num_online_cpus() != 1); | ||
| 110 | WARN_ON(nr_context_switches() > 0); | ||
| 111 | rcu_scheduler_active = 1; | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Awaken the corresponding synchronize_rcu() instance now that a | 96 | * Awaken the corresponding synchronize_rcu() instance now that a |
| 116 | * grace period has elapsed. | 97 | * grace period has elapsed. |
| 117 | */ | 98 | */ |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9f6d9ff2572c..38729d3cd236 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
| @@ -44,9 +44,9 @@ struct rcu_ctrlblk { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | /* Definition for rcupdate control block. */ | 46 | /* Definition for rcupdate control block. */ |
| 47 | static struct rcu_ctrlblk rcu_ctrlblk = { | 47 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { |
| 48 | .donetail = &rcu_ctrlblk.rcucblist, | 48 | .donetail = &rcu_sched_ctrlblk.rcucblist, |
| 49 | .curtail = &rcu_ctrlblk.rcucblist, | 49 | .curtail = &rcu_sched_ctrlblk.rcucblist, |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
| @@ -54,6 +54,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = { | |||
| 54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | 54 | .curtail = &rcu_bh_ctrlblk.rcucblist, |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 58 | int rcu_scheduler_active __read_mostly; | ||
| 59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
| 60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 61 | |||
| 57 | #ifdef CONFIG_NO_HZ | 62 | #ifdef CONFIG_NO_HZ |
| 58 | 63 | ||
| 59 | static long rcu_dynticks_nesting = 1; | 64 | static long rcu_dynticks_nesting = 1; |
| @@ -108,7 +113,8 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |||
| 108 | */ | 113 | */ |
| 109 | void rcu_sched_qs(int cpu) | 114 | void rcu_sched_qs(int cpu) |
| 110 | { | 115 | { |
| 111 | if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) | 116 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
| 117 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | ||
| 112 | raise_softirq(RCU_SOFTIRQ); | 118 | raise_softirq(RCU_SOFTIRQ); |
| 113 | } | 119 | } |
| 114 | 120 | ||
| @@ -173,7 +179,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
| 173 | */ | 179 | */ |
| 174 | static void rcu_process_callbacks(struct softirq_action *unused) | 180 | static void rcu_process_callbacks(struct softirq_action *unused) |
| 175 | { | 181 | { |
| 176 | __rcu_process_callbacks(&rcu_ctrlblk); | 182 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
| 177 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | 183 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
| 178 | } | 184 | } |
| 179 | 185 | ||
| @@ -187,7 +193,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
| 187 | * | 193 | * |
| 188 | * Cool, huh? (Due to Josh Triplett.) | 194 | * Cool, huh? (Due to Josh Triplett.) |
| 189 | * | 195 | * |
| 190 | * But we want to make this a static inline later. | 196 | * But we want to make this a static inline later. The cond_resched() |
| 197 | * currently makes this problematic. | ||
| 191 | */ | 198 | */ |
| 192 | void synchronize_sched(void) | 199 | void synchronize_sched(void) |
| 193 | { | 200 | { |
| @@ -195,12 +202,6 @@ void synchronize_sched(void) | |||
| 195 | } | 202 | } |
| 196 | EXPORT_SYMBOL_GPL(synchronize_sched); | 203 | EXPORT_SYMBOL_GPL(synchronize_sched); |
| 197 | 204 | ||
| 198 | void synchronize_rcu_bh(void) | ||
| 199 | { | ||
| 200 | synchronize_sched(); | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
| 203 | |||
| 204 | /* | 205 | /* |
| 205 | * Helper function for call_rcu() and call_rcu_bh(). | 206 | * Helper function for call_rcu() and call_rcu_bh(). |
| 206 | */ | 207 | */ |
| @@ -226,7 +227,7 @@ static void __call_rcu(struct rcu_head *head, | |||
| 226 | */ | 227 | */ |
| 227 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 228 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
| 228 | { | 229 | { |
| 229 | __call_rcu(head, func, &rcu_ctrlblk); | 230 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
| 230 | } | 231 | } |
| 231 | EXPORT_SYMBOL_GPL(call_rcu); | 232 | EXPORT_SYMBOL_GPL(call_rcu); |
| 232 | 233 | ||
| @@ -244,11 +245,13 @@ void rcu_barrier(void) | |||
| 244 | { | 245 | { |
| 245 | struct rcu_synchronize rcu; | 246 | struct rcu_synchronize rcu; |
| 246 | 247 | ||
| 248 | init_rcu_head_on_stack(&rcu.head); | ||
| 247 | init_completion(&rcu.completion); | 249 | init_completion(&rcu.completion); |
| 248 | /* Will wake me after RCU finished. */ | 250 | /* Will wake me after RCU finished. */ |
| 249 | call_rcu(&rcu.head, wakeme_after_rcu); | 251 | call_rcu(&rcu.head, wakeme_after_rcu); |
| 250 | /* Wait for it. */ | 252 | /* Wait for it. */ |
| 251 | wait_for_completion(&rcu.completion); | 253 | wait_for_completion(&rcu.completion); |
| 254 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 252 | } | 255 | } |
| 253 | EXPORT_SYMBOL_GPL(rcu_barrier); | 256 | EXPORT_SYMBOL_GPL(rcu_barrier); |
| 254 | 257 | ||
| @@ -256,11 +259,13 @@ void rcu_barrier_bh(void) | |||
| 256 | { | 259 | { |
| 257 | struct rcu_synchronize rcu; | 260 | struct rcu_synchronize rcu; |
| 258 | 261 | ||
| 262 | init_rcu_head_on_stack(&rcu.head); | ||
| 259 | init_completion(&rcu.completion); | 263 | init_completion(&rcu.completion); |
| 260 | /* Will wake me after RCU finished. */ | 264 | /* Will wake me after RCU finished. */ |
| 261 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | 265 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
| 262 | /* Wait for it. */ | 266 | /* Wait for it. */ |
| 263 | wait_for_completion(&rcu.completion); | 267 | wait_for_completion(&rcu.completion); |
| 268 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 264 | } | 269 | } |
| 265 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | 270 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
| 266 | 271 | ||
| @@ -268,11 +273,13 @@ void rcu_barrier_sched(void) | |||
| 268 | { | 273 | { |
| 269 | struct rcu_synchronize rcu; | 274 | struct rcu_synchronize rcu; |
| 270 | 275 | ||
| 276 | init_rcu_head_on_stack(&rcu.head); | ||
| 271 | init_completion(&rcu.completion); | 277 | init_completion(&rcu.completion); |
| 272 | /* Will wake me after RCU finished. */ | 278 | /* Will wake me after RCU finished. */ |
| 273 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | 279 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
| 274 | /* Wait for it. */ | 280 | /* Wait for it. */ |
| 275 | wait_for_completion(&rcu.completion); | 281 | wait_for_completion(&rcu.completion); |
| 282 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 276 | } | 283 | } |
| 277 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 284 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
| 278 | 285 | ||
| @@ -280,3 +287,5 @@ void __init rcu_init(void) | |||
| 280 | { | 287 | { |
| 281 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 288 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
| 282 | } | 289 | } |
| 290 | |||
| 291 | #include "rcutiny_plugin.h" | ||
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h new file mode 100644 index 000000000000..d223a92bc742 --- /dev/null +++ b/kernel/rcutiny_plugin.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* | ||
| 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
| 3 | * Internal non-public definitions that provide either classic | ||
| 4 | * or preemptable semantics. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 19 | * | ||
| 20 | * Copyright IBM Corporation, 2009 | ||
| 21 | * | ||
| 22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 26 | |||
| 27 | #include <linux/kernel_stat.h> | ||
| 28 | |||
| 29 | /* | ||
| 30 | * During boot, we forgive RCU lockdep issues. After this function is | ||
| 31 | * invoked, we start taking RCU lockdep issues seriously. | ||
| 32 | */ | ||
| 33 | void rcu_scheduler_starting(void) | ||
| 34 | { | ||
| 35 | WARN_ON(nr_context_switches() > 0); | ||
| 36 | rcu_scheduler_active = 1; | ||
| 37 | } | ||
| 38 | |||
| 39 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 58df55bf83ed..077defb34571 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -464,9 +464,11 @@ static void rcu_bh_torture_synchronize(void) | |||
| 464 | { | 464 | { |
| 465 | struct rcu_bh_torture_synchronize rcu; | 465 | struct rcu_bh_torture_synchronize rcu; |
| 466 | 466 | ||
| 467 | init_rcu_head_on_stack(&rcu.head); | ||
| 467 | init_completion(&rcu.completion); | 468 | init_completion(&rcu.completion); |
| 468 | call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); | 469 | call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); |
| 469 | wait_for_completion(&rcu.completion); | 470 | wait_for_completion(&rcu.completion); |
| 471 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 470 | } | 472 | } |
| 471 | 473 | ||
| 472 | static struct rcu_torture_ops rcu_bh_ops = { | 474 | static struct rcu_torture_ops rcu_bh_ops = { |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3ec8160fc75f..d4437345706f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
| 47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
| 48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
| 49 | #include <linux/kernel_stat.h> | ||
| 49 | 50 | ||
| 50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
| 51 | 52 | ||
| @@ -53,8 +54,8 @@ | |||
| 53 | 54 | ||
| 54 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | 55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; |
| 55 | 56 | ||
| 56 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(structname) { \ |
| 57 | .level = { &name.node[0] }, \ | 58 | .level = { &structname.node[0] }, \ |
| 58 | .levelcnt = { \ | 59 | .levelcnt = { \ |
| 59 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
| 60 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
| @@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | |||
| 65 | .signaled = RCU_GP_IDLE, \ | 66 | .signaled = RCU_GP_IDLE, \ |
| 66 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
| 67 | .completed = -300, \ | 68 | .completed = -300, \ |
| 68 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 69 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ |
| 69 | .orphan_cbs_list = NULL, \ | 70 | .orphan_cbs_list = NULL, \ |
| 70 | .orphan_cbs_tail = &name.orphan_cbs_list, \ | 71 | .orphan_cbs_tail = &structname.orphan_cbs_list, \ |
| 71 | .orphan_qlen = 0, \ | 72 | .orphan_qlen = 0, \ |
| 72 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \ | 73 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ |
| 73 | .n_force_qs = 0, \ | 74 | .n_force_qs = 0, \ |
| 74 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
| 76 | .name = #structname, \ | ||
| 75 | } | 77 | } |
| 76 | 78 | ||
| 77 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); | 79 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
| @@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
| 80 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 82 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
| 81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
| 82 | 84 | ||
| 85 | int rcu_scheduler_active __read_mostly; | ||
| 86 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
| 87 | |||
| 83 | /* | 88 | /* |
| 84 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 89 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
| 85 | * permit this function to be invoked without holding the root rcu_node | 90 | * permit this function to be invoked without holding the root rcu_node |
| @@ -97,25 +102,32 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) | |||
| 97 | */ | 102 | */ |
| 98 | void rcu_sched_qs(int cpu) | 103 | void rcu_sched_qs(int cpu) |
| 99 | { | 104 | { |
| 100 | struct rcu_data *rdp; | 105 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
| 101 | 106 | ||
| 102 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
| 103 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
| 104 | barrier(); | 108 | barrier(); |
| 105 | rdp->passed_quiesc = 1; | 109 | rdp->passed_quiesc = 1; |
| 106 | rcu_preempt_note_context_switch(cpu); | ||
| 107 | } | 110 | } |
| 108 | 111 | ||
| 109 | void rcu_bh_qs(int cpu) | 112 | void rcu_bh_qs(int cpu) |
| 110 | { | 113 | { |
| 111 | struct rcu_data *rdp; | 114 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
| 112 | 115 | ||
| 113 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
| 114 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 116 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
| 115 | barrier(); | 117 | barrier(); |
| 116 | rdp->passed_quiesc = 1; | 118 | rdp->passed_quiesc = 1; |
| 117 | } | 119 | } |
| 118 | 120 | ||
| 121 | /* | ||
| 122 | * Note a context switch. This is a quiescent state for RCU-sched, | ||
| 123 | * and requires special handling for preemptible RCU. | ||
| 124 | */ | ||
| 125 | void rcu_note_context_switch(int cpu) | ||
| 126 | { | ||
| 127 | rcu_sched_qs(cpu); | ||
| 128 | rcu_preempt_note_context_switch(cpu); | ||
| 129 | } | ||
| 130 | |||
| 119 | #ifdef CONFIG_NO_HZ | 131 | #ifdef CONFIG_NO_HZ |
| 120 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 132 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
| 121 | .dynticks_nesting = 1, | 133 | .dynticks_nesting = 1, |
| @@ -438,6 +450,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
| 438 | 450 | ||
| 439 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 451 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
| 440 | 452 | ||
| 453 | int rcu_cpu_stall_panicking __read_mostly; | ||
| 454 | |||
| 441 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 455 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
| 442 | { | 456 | { |
| 443 | rsp->gp_start = jiffies; | 457 | rsp->gp_start = jiffies; |
| @@ -470,7 +484,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 470 | 484 | ||
| 471 | /* OK, time to rat on our buddy... */ | 485 | /* OK, time to rat on our buddy... */ |
| 472 | 486 | ||
| 473 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 487 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", |
| 488 | rsp->name); | ||
| 474 | rcu_for_each_leaf_node(rsp, rnp) { | 489 | rcu_for_each_leaf_node(rsp, rnp) { |
| 475 | raw_spin_lock_irqsave(&rnp->lock, flags); | 490 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 476 | rcu_print_task_stall(rnp); | 491 | rcu_print_task_stall(rnp); |
| @@ -481,7 +496,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 481 | if (rnp->qsmask & (1UL << cpu)) | 496 | if (rnp->qsmask & (1UL << cpu)) |
| 482 | printk(" %d", rnp->grplo + cpu); | 497 | printk(" %d", rnp->grplo + cpu); |
| 483 | } | 498 | } |
| 484 | printk(" (detected by %d, t=%ld jiffies)\n", | 499 | printk("} (detected by %d, t=%ld jiffies)\n", |
| 485 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | 500 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
| 486 | trigger_all_cpu_backtrace(); | 501 | trigger_all_cpu_backtrace(); |
| 487 | 502 | ||
| @@ -497,8 +512,8 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
| 497 | unsigned long flags; | 512 | unsigned long flags; |
| 498 | struct rcu_node *rnp = rcu_get_root(rsp); | 513 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 499 | 514 | ||
| 500 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | 515 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", |
| 501 | smp_processor_id(), jiffies - rsp->gp_start); | 516 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); |
| 502 | trigger_all_cpu_backtrace(); | 517 | trigger_all_cpu_backtrace(); |
| 503 | 518 | ||
| 504 | raw_spin_lock_irqsave(&rnp->lock, flags); | 519 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| @@ -515,6 +530,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 515 | long delta; | 530 | long delta; |
| 516 | struct rcu_node *rnp; | 531 | struct rcu_node *rnp; |
| 517 | 532 | ||
| 533 | if (rcu_cpu_stall_panicking) | ||
| 534 | return; | ||
| 518 | delta = jiffies - rsp->jiffies_stall; | 535 | delta = jiffies - rsp->jiffies_stall; |
| 519 | rnp = rdp->mynode; | 536 | rnp = rdp->mynode; |
| 520 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | 537 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { |
| @@ -529,6 +546,21 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 529 | } | 546 | } |
| 530 | } | 547 | } |
| 531 | 548 | ||
| 549 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | ||
| 550 | { | ||
| 551 | rcu_cpu_stall_panicking = 1; | ||
| 552 | return NOTIFY_DONE; | ||
| 553 | } | ||
| 554 | |||
| 555 | static struct notifier_block rcu_panic_block = { | ||
| 556 | .notifier_call = rcu_panic, | ||
| 557 | }; | ||
| 558 | |||
| 559 | static void __init check_cpu_stall_init(void) | ||
| 560 | { | ||
| 561 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | ||
| 562 | } | ||
| 563 | |||
| 532 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 564 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 533 | 565 | ||
| 534 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 566 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
| @@ -539,6 +571,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 539 | { | 571 | { |
| 540 | } | 572 | } |
| 541 | 573 | ||
| 574 | static void __init check_cpu_stall_init(void) | ||
| 575 | { | ||
| 576 | } | ||
| 577 | |||
| 542 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 578 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 543 | 579 | ||
| 544 | /* | 580 | /* |
| @@ -1125,8 +1161,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1125 | */ | 1161 | */ |
| 1126 | void rcu_check_callbacks(int cpu, int user) | 1162 | void rcu_check_callbacks(int cpu, int user) |
| 1127 | { | 1163 | { |
| 1128 | if (!rcu_pending(cpu)) | ||
| 1129 | return; /* if nothing for RCU to do. */ | ||
| 1130 | if (user || | 1164 | if (user || |
| 1131 | (idle_cpu(cpu) && rcu_scheduler_active && | 1165 | (idle_cpu(cpu) && rcu_scheduler_active && |
| 1132 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1166 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| @@ -1158,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 1158 | rcu_bh_qs(cpu); | 1192 | rcu_bh_qs(cpu); |
| 1159 | } | 1193 | } |
| 1160 | rcu_preempt_check_callbacks(cpu); | 1194 | rcu_preempt_check_callbacks(cpu); |
| 1161 | raise_softirq(RCU_SOFTIRQ); | 1195 | if (rcu_pending(cpu)) |
| 1196 | raise_softirq(RCU_SOFTIRQ); | ||
| 1162 | } | 1197 | } |
| 1163 | 1198 | ||
| 1164 | #ifdef CONFIG_SMP | 1199 | #ifdef CONFIG_SMP |
| @@ -1236,11 +1271,11 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
| 1236 | break; /* grace period idle or initializing, ignore. */ | 1271 | break; /* grace period idle or initializing, ignore. */ |
| 1237 | 1272 | ||
| 1238 | case RCU_SAVE_DYNTICK: | 1273 | case RCU_SAVE_DYNTICK: |
| 1239 | |||
| 1240 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
| 1241 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) | 1274 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) |
| 1242 | break; /* So gcc recognizes the dead code. */ | 1275 | break; /* So gcc recognizes the dead code. */ |
| 1243 | 1276 | ||
| 1277 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
| 1278 | |||
| 1244 | /* Record dyntick-idle state. */ | 1279 | /* Record dyntick-idle state. */ |
| 1245 | force_qs_rnp(rsp, dyntick_save_progress_counter); | 1280 | force_qs_rnp(rsp, dyntick_save_progress_counter); |
| 1246 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 1281 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
| @@ -1449,11 +1484,13 @@ void synchronize_sched(void) | |||
| 1449 | if (rcu_blocking_is_gp()) | 1484 | if (rcu_blocking_is_gp()) |
| 1450 | return; | 1485 | return; |
| 1451 | 1486 | ||
| 1487 | init_rcu_head_on_stack(&rcu.head); | ||
| 1452 | init_completion(&rcu.completion); | 1488 | init_completion(&rcu.completion); |
| 1453 | /* Will wake me after RCU finished. */ | 1489 | /* Will wake me after RCU finished. */ |
| 1454 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | 1490 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
| 1455 | /* Wait for it. */ | 1491 | /* Wait for it. */ |
| 1456 | wait_for_completion(&rcu.completion); | 1492 | wait_for_completion(&rcu.completion); |
| 1493 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 1457 | } | 1494 | } |
| 1458 | EXPORT_SYMBOL_GPL(synchronize_sched); | 1495 | EXPORT_SYMBOL_GPL(synchronize_sched); |
| 1459 | 1496 | ||
| @@ -1473,11 +1510,13 @@ void synchronize_rcu_bh(void) | |||
| 1473 | if (rcu_blocking_is_gp()) | 1510 | if (rcu_blocking_is_gp()) |
| 1474 | return; | 1511 | return; |
| 1475 | 1512 | ||
| 1513 | init_rcu_head_on_stack(&rcu.head); | ||
| 1476 | init_completion(&rcu.completion); | 1514 | init_completion(&rcu.completion); |
| 1477 | /* Will wake me after RCU finished. */ | 1515 | /* Will wake me after RCU finished. */ |
| 1478 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | 1516 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
| 1479 | /* Wait for it. */ | 1517 | /* Wait for it. */ |
| 1480 | wait_for_completion(&rcu.completion); | 1518 | wait_for_completion(&rcu.completion); |
| 1519 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 1481 | } | 1520 | } |
| 1482 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 1521 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
| 1483 | 1522 | ||
| @@ -1498,8 +1537,20 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1498 | check_cpu_stall(rsp, rdp); | 1537 | check_cpu_stall(rsp, rdp); |
| 1499 | 1538 | ||
| 1500 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 1539 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
| 1501 | if (rdp->qs_pending) { | 1540 | if (rdp->qs_pending && !rdp->passed_quiesc) { |
| 1541 | |||
| 1542 | /* | ||
| 1543 | * If force_quiescent_state() coming soon and this CPU | ||
| 1544 | * needs a quiescent state, and this is either RCU-sched | ||
| 1545 | * or RCU-bh, force a local reschedule. | ||
| 1546 | */ | ||
| 1502 | rdp->n_rp_qs_pending++; | 1547 | rdp->n_rp_qs_pending++; |
| 1548 | if (!rdp->preemptable && | ||
| 1549 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | ||
| 1550 | jiffies)) | ||
| 1551 | set_need_resched(); | ||
| 1552 | } else if (rdp->qs_pending && rdp->passed_quiesc) { | ||
| 1553 | rdp->n_rp_report_qs++; | ||
| 1503 | return 1; | 1554 | return 1; |
| 1504 | } | 1555 | } |
| 1505 | 1556 | ||
| @@ -1767,6 +1818,21 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
| 1767 | } | 1818 | } |
| 1768 | 1819 | ||
| 1769 | /* | 1820 | /* |
| 1821 | * This function is invoked towards the end of the scheduler's initialization | ||
| 1822 | * process. Before this is called, the idle task might contain | ||
| 1823 | * RCU read-side critical sections (during which time, this idle | ||
| 1824 | * task is booting the system). After this function is called, the | ||
| 1825 | * idle tasks are prohibited from containing RCU read-side critical | ||
| 1826 | * sections. This function also enables RCU lockdep checking. | ||
| 1827 | */ | ||
| 1828 | void rcu_scheduler_starting(void) | ||
| 1829 | { | ||
| 1830 | WARN_ON(num_online_cpus() != 1); | ||
| 1831 | WARN_ON(nr_context_switches() > 0); | ||
| 1832 | rcu_scheduler_active = 1; | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | /* | ||
| 1770 | * Compute the per-level fanout, either using the exact fanout specified | 1836 | * Compute the per-level fanout, either using the exact fanout specified |
| 1771 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. | 1837 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. |
| 1772 | */ | 1838 | */ |
| @@ -1849,6 +1915,14 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | 1915 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); |
| 1850 | } | 1916 | } |
| 1851 | } | 1917 | } |
| 1918 | |||
| 1919 | rnp = rsp->level[NUM_RCU_LVLS - 1]; | ||
| 1920 | for_each_possible_cpu(i) { | ||
| 1921 | while (i > rnp->grphi) | ||
| 1922 | rnp++; | ||
| 1923 | rsp->rda[i]->mynode = rnp; | ||
| 1924 | rcu_boot_init_percpu_data(i, rsp); | ||
| 1925 | } | ||
| 1852 | } | 1926 | } |
| 1853 | 1927 | ||
| 1854 | /* | 1928 | /* |
| @@ -1859,19 +1933,11 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1859 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ | 1933 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
| 1860 | do { \ | 1934 | do { \ |
| 1861 | int i; \ | 1935 | int i; \ |
| 1862 | int j; \ | ||
| 1863 | struct rcu_node *rnp; \ | ||
| 1864 | \ | 1936 | \ |
| 1865 | rcu_init_one(rsp); \ | ||
| 1866 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | ||
| 1867 | j = 0; \ | ||
| 1868 | for_each_possible_cpu(i) { \ | 1937 | for_each_possible_cpu(i) { \ |
| 1869 | if (i > rnp[j].grphi) \ | ||
| 1870 | j++; \ | ||
| 1871 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | ||
| 1872 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1938 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
| 1873 | rcu_boot_init_percpu_data(i, rsp); \ | ||
| 1874 | } \ | 1939 | } \ |
| 1940 | rcu_init_one(rsp); \ | ||
| 1875 | } while (0) | 1941 | } while (0) |
| 1876 | 1942 | ||
| 1877 | void __init rcu_init(void) | 1943 | void __init rcu_init(void) |
| @@ -1879,12 +1945,6 @@ void __init rcu_init(void) | |||
| 1879 | int cpu; | 1945 | int cpu; |
| 1880 | 1946 | ||
| 1881 | rcu_bootup_announce(); | 1947 | rcu_bootup_announce(); |
| 1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
| 1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 1885 | #if NUM_RCU_LVL_4 != 0 | ||
| 1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); | ||
| 1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ | ||
| 1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1948 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
| 1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1949 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
| 1890 | __rcu_init_preempt(); | 1950 | __rcu_init_preempt(); |
| @@ -1898,6 +1958,7 @@ void __init rcu_init(void) | |||
| 1898 | cpu_notifier(rcu_cpu_notify, 0); | 1958 | cpu_notifier(rcu_cpu_notify, 0); |
| 1899 | for_each_online_cpu(cpu) | 1959 | for_each_online_cpu(cpu) |
| 1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 1960 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
| 1961 | check_cpu_stall_init(); | ||
| 1901 | } | 1962 | } |
| 1902 | 1963 | ||
| 1903 | #include "rcutree_plugin.h" | 1964 | #include "rcutree_plugin.h" |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 4a525a30e08e..14c040b18ed0 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -223,6 +223,7 @@ struct rcu_data { | |||
| 223 | /* 5) __rcu_pending() statistics. */ | 223 | /* 5) __rcu_pending() statistics. */ |
| 224 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ | 224 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ |
| 225 | unsigned long n_rp_qs_pending; | 225 | unsigned long n_rp_qs_pending; |
| 226 | unsigned long n_rp_report_qs; | ||
| 226 | unsigned long n_rp_cb_ready; | 227 | unsigned long n_rp_cb_ready; |
| 227 | unsigned long n_rp_cpu_needs_gp; | 228 | unsigned long n_rp_cpu_needs_gp; |
| 228 | unsigned long n_rp_gp_completed; | 229 | unsigned long n_rp_gp_completed; |
| @@ -326,6 +327,7 @@ struct rcu_state { | |||
| 326 | unsigned long jiffies_stall; /* Time at which to check */ | 327 | unsigned long jiffies_stall; /* Time at which to check */ |
| 327 | /* for CPU stalls. */ | 328 | /* for CPU stalls. */ |
| 328 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 329 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 330 | char *name; /* Name of structure. */ | ||
| 329 | }; | 331 | }; |
| 330 | 332 | ||
| 331 | /* Return values for rcu_preempt_offline_tasks(). */ | 333 | /* Return values for rcu_preempt_offline_tasks(). */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 79b53bda8943..0e4f420245d9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -26,6 +26,45 @@ | |||
| 26 | 26 | ||
| 27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
| 28 | 28 | ||
| 29 | /* | ||
| 30 | * Check the RCU kernel configuration parameters and print informative | ||
| 31 | * messages about anything out of the ordinary. If you like #ifdef, you | ||
| 32 | * will love this function. | ||
| 33 | */ | ||
| 34 | static void __init rcu_bootup_announce_oddness(void) | ||
| 35 | { | ||
| 36 | #ifdef CONFIG_RCU_TRACE | ||
| 37 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | ||
| 38 | #endif | ||
| 39 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | ||
| 40 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | ||
| 41 | CONFIG_RCU_FANOUT); | ||
| 42 | #endif | ||
| 43 | #ifdef CONFIG_RCU_FANOUT_EXACT | ||
| 44 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | ||
| 45 | #endif | ||
| 46 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
| 47 | printk(KERN_INFO | ||
| 48 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | ||
| 49 | #endif | ||
| 50 | #ifdef CONFIG_PROVE_RCU | ||
| 51 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | ||
| 52 | #endif | ||
| 53 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | ||
| 54 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | ||
| 55 | #endif | ||
| 56 | #ifndef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 57 | printk(KERN_INFO | ||
| 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | ||
| 59 | #endif | ||
| 60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | ||
| 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | ||
| 62 | #endif | ||
| 63 | #if NUM_RCU_LVL_4 != 0 | ||
| 64 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | ||
| 65 | #endif | ||
| 66 | } | ||
| 67 | |||
| 29 | #ifdef CONFIG_TREE_PREEMPT_RCU | 68 | #ifdef CONFIG_TREE_PREEMPT_RCU |
| 30 | 69 | ||
| 31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 70 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); |
| @@ -38,8 +77,8 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp); | |||
| 38 | */ | 77 | */ |
| 39 | static void __init rcu_bootup_announce(void) | 78 | static void __init rcu_bootup_announce(void) |
| 40 | { | 79 | { |
| 41 | printk(KERN_INFO | 80 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); |
| 42 | "Experimental preemptable hierarchical RCU implementation.\n"); | 81 | rcu_bootup_announce_oddness(); |
| 43 | } | 82 | } |
| 44 | 83 | ||
| 45 | /* | 84 | /* |
| @@ -75,13 +114,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |||
| 75 | * that this just means that the task currently running on the CPU is | 114 | * that this just means that the task currently running on the CPU is |
| 76 | * not in a quiescent state. There might be any number of tasks blocked | 115 | * not in a quiescent state. There might be any number of tasks blocked |
| 77 | * while in an RCU read-side critical section. | 116 | * while in an RCU read-side critical section. |
| 117 | * | ||
| 118 | * Unlike the other rcu_*_qs() functions, callers to this function | ||
| 119 | * must disable irqs in order to protect the assignment to | ||
| 120 | * ->rcu_read_unlock_special. | ||
| 78 | */ | 121 | */ |
| 79 | static void rcu_preempt_qs(int cpu) | 122 | static void rcu_preempt_qs(int cpu) |
| 80 | { | 123 | { |
| 81 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 124 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
| 125 | |||
| 82 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 126 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
| 83 | barrier(); | 127 | barrier(); |
| 84 | rdp->passed_quiesc = 1; | 128 | rdp->passed_quiesc = 1; |
| 129 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 85 | } | 130 | } |
| 86 | 131 | ||
| 87 | /* | 132 | /* |
| @@ -144,9 +189,8 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 144 | * grace period, then the fact that the task has been enqueued | 189 | * grace period, then the fact that the task has been enqueued |
| 145 | * means that we continue to block the current grace period. | 190 | * means that we continue to block the current grace period. |
| 146 | */ | 191 | */ |
| 147 | rcu_preempt_qs(cpu); | ||
| 148 | local_irq_save(flags); | 192 | local_irq_save(flags); |
| 149 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 193 | rcu_preempt_qs(cpu); |
| 150 | local_irq_restore(flags); | 194 | local_irq_restore(flags); |
| 151 | } | 195 | } |
| 152 | 196 | ||
| @@ -236,7 +280,6 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
| 236 | */ | 280 | */ |
| 237 | special = t->rcu_read_unlock_special; | 281 | special = t->rcu_read_unlock_special; |
| 238 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 282 | if (special & RCU_READ_UNLOCK_NEED_QS) { |
| 239 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 240 | rcu_preempt_qs(smp_processor_id()); | 283 | rcu_preempt_qs(smp_processor_id()); |
| 241 | } | 284 | } |
| 242 | 285 | ||
| @@ -473,7 +516,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
| 473 | struct task_struct *t = current; | 516 | struct task_struct *t = current; |
| 474 | 517 | ||
| 475 | if (t->rcu_read_lock_nesting == 0) { | 518 | if (t->rcu_read_lock_nesting == 0) { |
| 476 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 477 | rcu_preempt_qs(cpu); | 519 | rcu_preempt_qs(cpu); |
| 478 | return; | 520 | return; |
| 479 | } | 521 | } |
| @@ -515,11 +557,13 @@ void synchronize_rcu(void) | |||
| 515 | if (!rcu_scheduler_active) | 557 | if (!rcu_scheduler_active) |
| 516 | return; | 558 | return; |
| 517 | 559 | ||
| 560 | init_rcu_head_on_stack(&rcu.head); | ||
| 518 | init_completion(&rcu.completion); | 561 | init_completion(&rcu.completion); |
| 519 | /* Will wake me after RCU finished. */ | 562 | /* Will wake me after RCU finished. */ |
| 520 | call_rcu(&rcu.head, wakeme_after_rcu); | 563 | call_rcu(&rcu.head, wakeme_after_rcu); |
| 521 | /* Wait for it. */ | 564 | /* Wait for it. */ |
| 522 | wait_for_completion(&rcu.completion); | 565 | wait_for_completion(&rcu.completion); |
| 566 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 523 | } | 567 | } |
| 524 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 568 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
| 525 | 569 | ||
| @@ -754,6 +798,7 @@ void exit_rcu(void) | |||
| 754 | static void __init rcu_bootup_announce(void) | 798 | static void __init rcu_bootup_announce(void) |
| 755 | { | 799 | { |
| 756 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 800 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); |
| 801 | rcu_bootup_announce_oddness(); | ||
| 757 | } | 802 | } |
| 758 | 803 | ||
| 759 | /* | 804 | /* |
| @@ -1008,6 +1053,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
| 1008 | int rcu_needs_cpu(int cpu) | 1053 | int rcu_needs_cpu(int cpu) |
| 1009 | { | 1054 | { |
| 1010 | int c = 0; | 1055 | int c = 0; |
| 1056 | int snap; | ||
| 1057 | int snap_nmi; | ||
| 1011 | int thatcpu; | 1058 | int thatcpu; |
| 1012 | 1059 | ||
| 1013 | /* Check for being in the holdoff period. */ | 1060 | /* Check for being in the holdoff period. */ |
| @@ -1015,12 +1062,18 @@ int rcu_needs_cpu(int cpu) | |||
| 1015 | return rcu_needs_cpu_quick_check(cpu); | 1062 | return rcu_needs_cpu_quick_check(cpu); |
| 1016 | 1063 | ||
| 1017 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1064 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
| 1018 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1065 | for_each_online_cpu(thatcpu) { |
| 1019 | if (thatcpu != cpu) { | 1066 | if (thatcpu == cpu) |
| 1067 | continue; | ||
| 1068 | snap = per_cpu(rcu_dynticks, thatcpu).dynticks; | ||
| 1069 | snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; | ||
| 1070 | smp_mb(); /* Order sampling of snap with end of grace period. */ | ||
| 1071 | if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { | ||
| 1020 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 1072 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
| 1021 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 1073 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
| 1022 | return rcu_needs_cpu_quick_check(cpu); | 1074 | return rcu_needs_cpu_quick_check(cpu); |
| 1023 | } | 1075 | } |
| 1076 | } | ||
| 1024 | 1077 | ||
| 1025 | /* Check and update the rcu_dyntick_drain sequencing. */ | 1078 | /* Check and update the rcu_dyntick_drain sequencing. */ |
| 1026 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 1079 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index d45db2e35d27..36c95b45738e 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
| @@ -241,11 +241,13 @@ static const struct file_operations rcugp_fops = { | |||
| 241 | static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) | 241 | static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) |
| 242 | { | 242 | { |
| 243 | seq_printf(m, "%3d%cnp=%ld " | 243 | seq_printf(m, "%3d%cnp=%ld " |
| 244 | "qsp=%ld cbr=%ld cng=%ld gpc=%ld gps=%ld nf=%ld nn=%ld\n", | 244 | "qsp=%ld rpq=%ld cbr=%ld cng=%ld " |
| 245 | "gpc=%ld gps=%ld nf=%ld nn=%ld\n", | ||
| 245 | rdp->cpu, | 246 | rdp->cpu, |
| 246 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | 247 | cpu_is_offline(rdp->cpu) ? '!' : ' ', |
| 247 | rdp->n_rcu_pending, | 248 | rdp->n_rcu_pending, |
| 248 | rdp->n_rp_qs_pending, | 249 | rdp->n_rp_qs_pending, |
| 250 | rdp->n_rp_report_qs, | ||
| 249 | rdp->n_rp_cb_ready, | 251 | rdp->n_rp_cb_ready, |
| 250 | rdp->n_rp_cpu_needs_gp, | 252 | rdp->n_rp_cpu_needs_gp, |
| 251 | rdp->n_rp_gp_completed, | 253 | rdp->n_rp_gp_completed, |
diff --git a/kernel/sched.c b/kernel/sched.c index 3c2a54f70ffe..d8a213ccdc3b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3706,7 +3706,7 @@ need_resched: | |||
| 3706 | preempt_disable(); | 3706 | preempt_disable(); |
| 3707 | cpu = smp_processor_id(); | 3707 | cpu = smp_processor_id(); |
| 3708 | rq = cpu_rq(cpu); | 3708 | rq = cpu_rq(cpu); |
| 3709 | rcu_sched_qs(cpu); | 3709 | rcu_note_context_switch(cpu); |
| 3710 | prev = rq->curr; | 3710 | prev = rq->curr; |
| 3711 | switch_count = &prev->nivcsw; | 3711 | switch_count = &prev->nivcsw; |
| 3712 | 3712 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 7c1a67ef0274..0db913a5c60f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -716,7 +716,7 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
| 716 | preempt_enable_no_resched(); | 716 | preempt_enable_no_resched(); |
| 717 | cond_resched(); | 717 | cond_resched(); |
| 718 | preempt_disable(); | 718 | preempt_disable(); |
| 719 | rcu_sched_qs((long)__bind_cpu); | 719 | rcu_note_context_switch((long)__bind_cpu); |
| 720 | } | 720 | } |
| 721 | preempt_enable(); | 721 | preempt_enable(); |
| 722 | set_current_state(TASK_INTERRUPTIBLE); | 722 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 935248bdbc47..930a9e5eae08 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -512,6 +512,18 @@ config PROVE_RCU | |||
| 512 | 512 | ||
| 513 | Say N if you are unsure. | 513 | Say N if you are unsure. |
| 514 | 514 | ||
| 515 | config PROVE_RCU_REPEATEDLY | ||
| 516 | bool "RCU debugging: don't disable PROVE_RCU on first splat" | ||
| 517 | depends on PROVE_RCU | ||
| 518 | default n | ||
| 519 | help | ||
| 520 | By itself, PROVE_RCU will disable checking upon issuing the | ||
| 521 | first warning (or "splat"). This feature prevents such | ||
| 522 | disabling, allowing multiple RCU-lockdep warnings to be printed | ||
| 523 | on a single reboot. | ||
| 524 | |||
| 525 | Say N if you are unsure. | ||
| 526 | |||
| 515 | config LOCKDEP | 527 | config LOCKDEP |
| 516 | bool | 528 | bool |
| 517 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 529 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| @@ -793,7 +805,7 @@ config RCU_CPU_STALL_DETECTOR | |||
| 793 | config RCU_CPU_STALL_VERBOSE | 805 | config RCU_CPU_STALL_VERBOSE |
| 794 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | 806 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" |
| 795 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | 807 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU |
| 796 | default n | 808 | default y |
| 797 | help | 809 | help |
| 798 | This option causes RCU to printk detailed per-task information | 810 | This option causes RCU to printk detailed per-task information |
| 799 | for any tasks that are stalling the current RCU grace period. | 811 | for any tasks that are stalling the current RCU grace period. |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index bf007a43c053..deebcc57d4e6 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -141,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 141 | obj->object = addr; | 141 | obj->object = addr; |
| 142 | obj->descr = descr; | 142 | obj->descr = descr; |
| 143 | obj->state = ODEBUG_STATE_NONE; | 143 | obj->state = ODEBUG_STATE_NONE; |
| 144 | obj->astate = 0; | ||
| 144 | hlist_del(&obj->node); | 145 | hlist_del(&obj->node); |
| 145 | 146 | ||
| 146 | hlist_add_head(&obj->node, &b->list); | 147 | hlist_add_head(&obj->node, &b->list); |
| @@ -252,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg) | |||
| 252 | 253 | ||
| 253 | if (limit < 5 && obj->descr != descr_test) { | 254 | if (limit < 5 && obj->descr != descr_test) { |
| 254 | limit++; | 255 | limit++; |
| 255 | WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | 256 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
| 256 | obj_states[obj->state], obj->descr->name); | 257 | "object type: %s\n", |
| 258 | msg, obj_states[obj->state], obj->astate, | ||
| 259 | obj->descr->name); | ||
| 257 | } | 260 | } |
| 258 | debug_objects_warnings++; | 261 | debug_objects_warnings++; |
| 259 | } | 262 | } |
| @@ -447,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
| 447 | case ODEBUG_STATE_INIT: | 450 | case ODEBUG_STATE_INIT: |
| 448 | case ODEBUG_STATE_INACTIVE: | 451 | case ODEBUG_STATE_INACTIVE: |
| 449 | case ODEBUG_STATE_ACTIVE: | 452 | case ODEBUG_STATE_ACTIVE: |
| 450 | obj->state = ODEBUG_STATE_INACTIVE; | 453 | if (!obj->astate) |
| 454 | obj->state = ODEBUG_STATE_INACTIVE; | ||
| 455 | else | ||
| 456 | debug_print_object(obj, "deactivate"); | ||
| 451 | break; | 457 | break; |
| 452 | 458 | ||
| 453 | case ODEBUG_STATE_DESTROYED: | 459 | case ODEBUG_STATE_DESTROYED: |
| @@ -553,6 +559,53 @@ out_unlock: | |||
| 553 | raw_spin_unlock_irqrestore(&db->lock, flags); | 559 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 554 | } | 560 | } |
| 555 | 561 | ||
| 562 | /** | ||
| 563 | * debug_object_active_state - debug checks object usage state machine | ||
| 564 | * @addr: address of the object | ||
| 565 | * @descr: pointer to an object specific debug description structure | ||
| 566 | * @expect: expected state | ||
| 567 | * @next: state to move to if expected state is found | ||
| 568 | */ | ||
| 569 | void | ||
| 570 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | ||
| 571 | unsigned int expect, unsigned int next) | ||
| 572 | { | ||
| 573 | struct debug_bucket *db; | ||
| 574 | struct debug_obj *obj; | ||
| 575 | unsigned long flags; | ||
| 576 | |||
| 577 | if (!debug_objects_enabled) | ||
| 578 | return; | ||
| 579 | |||
| 580 | db = get_bucket((unsigned long) addr); | ||
| 581 | |||
| 582 | raw_spin_lock_irqsave(&db->lock, flags); | ||
| 583 | |||
| 584 | obj = lookup_object(addr, db); | ||
| 585 | if (obj) { | ||
| 586 | switch (obj->state) { | ||
| 587 | case ODEBUG_STATE_ACTIVE: | ||
| 588 | if (obj->astate == expect) | ||
| 589 | obj->astate = next; | ||
| 590 | else | ||
| 591 | debug_print_object(obj, "active_state"); | ||
| 592 | break; | ||
| 593 | |||
| 594 | default: | ||
| 595 | debug_print_object(obj, "active_state"); | ||
| 596 | break; | ||
| 597 | } | ||
| 598 | } else { | ||
| 599 | struct debug_obj o = { .object = addr, | ||
| 600 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
| 601 | .descr = descr }; | ||
| 602 | |||
| 603 | debug_print_object(&o, "active_state"); | ||
| 604 | } | ||
| 605 | |||
| 606 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
| 607 | } | ||
| 608 | |||
| 556 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | 609 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
| 557 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 610 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
| 558 | { | 611 | { |
