diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-06-17 18:53:19 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:38:12 -0400 |
commit | 29c00b4a1d9e277786120032aa8364631820d863 (patch) | |
tree | 23bf0c8cdb5268ef92ef9ed007639705e922b1b0 /kernel | |
parent | 9d68197c05201d8edc70d58bd1d5dad05d8455e8 (diff) |
rcu: Add event-tracing for RCU callback invocation
There was recently some controversy about the overhead of invoking RCU
callbacks. Add TRACE_EVENT()s to obtain fine-grained timings for the
start and stop of a batch of callbacks and also for each callback invoked.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu.h | 79 | ||||
-rw-r--r-- | kernel/rcupdate.c | 5 | ||||
-rw-r--r-- | kernel/rcutiny.c | 26 | ||||
-rw-r--r-- | kernel/rcutree.c | 15 |
4 files changed, 121 insertions, 4 deletions
diff --git a/kernel/rcu.h b/kernel/rcu.h new file mode 100644 index 000000000000..7bc16436aba0 --- /dev/null +++ b/kernel/rcu.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Read-Copy Update definitions shared among RCU implementations. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2011 | ||
19 | * | ||
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
21 | */ | ||
22 | |||
23 | #ifndef __LINUX_RCU_H | ||
24 | #define __LINUX_RCU_H | ||
25 | |||
26 | /* | ||
27 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally | ||
28 | * by call_rcu() and rcu callback execution, and are therefore not part of the | ||
29 | * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. | ||
30 | */ | ||
31 | |||
32 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
33 | # define STATE_RCU_HEAD_READY 0 | ||
34 | # define STATE_RCU_HEAD_QUEUED 1 | ||
35 | |||
36 | extern struct debug_obj_descr rcuhead_debug_descr; | ||
37 | |||
38 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
39 | { | ||
40 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
41 | debug_object_activate(head, &rcuhead_debug_descr); | ||
42 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
43 | STATE_RCU_HEAD_READY, | ||
44 | STATE_RCU_HEAD_QUEUED); | ||
45 | } | ||
46 | |||
47 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
48 | { | ||
49 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
50 | STATE_RCU_HEAD_QUEUED, | ||
51 | STATE_RCU_HEAD_READY); | ||
52 | debug_object_deactivate(head, &rcuhead_debug_descr); | ||
53 | } | ||
54 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
55 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
56 | { | ||
57 | } | ||
58 | |||
59 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
60 | { | ||
61 | } | ||
62 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
63 | |||
64 | extern void kfree(const void *); | ||
65 | |||
66 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
67 | { | ||
68 | unsigned long offset = (unsigned long)head->func; | ||
69 | |||
70 | if (__is_kfree_rcu_offset(offset)) { | ||
71 | trace_rcu_invoke_kfree_callback(head, offset); | ||
72 | kfree((void *)head - offset); | ||
73 | } else { | ||
74 | trace_rcu_invoke_callback(head); | ||
75 | head->func(head); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | #endif /* __LINUX_RCU_H */ | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 09b3b1b54e02..ca0d23b6b3e8 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -46,6 +46,11 @@ | |||
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/hardirq.h> | 47 | #include <linux/hardirq.h> |
48 | 48 | ||
49 | #define CREATE_TRACE_POINTS | ||
50 | #include <trace/events/rcu.h> | ||
51 | |||
52 | #include "rcu.h" | ||
53 | |||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 55 | static struct lock_class_key rcu_lock_key; |
51 | struct lockdep_map rcu_lock_map = | 56 | struct lockdep_map rcu_lock_map = |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index f544e343256a..19453ba1392e 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -37,6 +37,25 @@ | |||
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | #include <linux/prefetch.h> | 38 | #include <linux/prefetch.h> |
39 | 39 | ||
40 | #ifdef CONFIG_RCU_TRACE | ||
41 | |||
42 | #include <trace/events/rcu.h> | ||
43 | |||
44 | #else /* #ifdef CONFIG_RCU_TRACE */ | ||
45 | |||
46 | /* No by-default tracing in TINY_RCU: Keep TINY_RCU tiny! */ | ||
47 | static void trace_rcu_invoke_kfree_callback(struct rcu_head *rhp, | ||
48 | unsigned long offset) | ||
49 | { | ||
50 | } | ||
51 | static void trace_rcu_invoke_callback(struct rcu_head *head) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | ||
56 | |||
57 | #include "rcu.h" | ||
58 | |||
40 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ | 59 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ |
41 | static struct task_struct *rcu_kthread_task; | 60 | static struct task_struct *rcu_kthread_task; |
42 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | 61 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
@@ -161,11 +180,15 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
161 | RCU_TRACE(int cb_count = 0); | 180 | RCU_TRACE(int cb_count = 0); |
162 | 181 | ||
163 | /* If no RCU callbacks ready to invoke, just return. */ | 182 | /* If no RCU callbacks ready to invoke, just return. */ |
164 | if (&rcp->rcucblist == rcp->donetail) | 183 | if (&rcp->rcucblist == rcp->donetail) { |
184 | RCU_TRACE(trace_rcu_batch_start(0, -1)); | ||
185 | RCU_TRACE(trace_rcu_batch_end(0)); | ||
165 | return; | 186 | return; |
187 | } | ||
166 | 188 | ||
167 | /* Move the ready-to-invoke callbacks to a local list. */ | 189 | /* Move the ready-to-invoke callbacks to a local list. */ |
168 | local_irq_save(flags); | 190 | local_irq_save(flags); |
191 | RCU_TRACE(trace_rcu_batch_start(0, -1)); | ||
169 | list = rcp->rcucblist; | 192 | list = rcp->rcucblist; |
170 | rcp->rcucblist = *rcp->donetail; | 193 | rcp->rcucblist = *rcp->donetail; |
171 | *rcp->donetail = NULL; | 194 | *rcp->donetail = NULL; |
@@ -187,6 +210,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
187 | RCU_TRACE(cb_count++); | 210 | RCU_TRACE(cb_count++); |
188 | } | 211 | } |
189 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); | 212 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
213 | RCU_TRACE(trace_rcu_batch_end(cb_count)); | ||
190 | } | 214 | } |
191 | 215 | ||
192 | /* | 216 | /* |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index a7c6bce1af83..45dcc2036a1e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -52,6 +52,9 @@ | |||
52 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
53 | 53 | ||
54 | #include "rcutree.h" | 54 | #include "rcutree.h" |
55 | #include <trace/events/rcu.h> | ||
56 | |||
57 | #include "rcu.h" | ||
55 | 58 | ||
56 | /* Data structures. */ | 59 | /* Data structures. */ |
57 | 60 | ||
@@ -1190,17 +1193,22 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1190 | { | 1193 | { |
1191 | unsigned long flags; | 1194 | unsigned long flags; |
1192 | struct rcu_head *next, *list, **tail; | 1195 | struct rcu_head *next, *list, **tail; |
1193 | int count; | 1196 | int bl, count; |
1194 | 1197 | ||
1195 | /* If no callbacks are ready, just return.*/ | 1198 | /* If no callbacks are ready, just return.*/ |
1196 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) | 1199 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) { |
1200 | trace_rcu_batch_start(0, 0); | ||
1201 | trace_rcu_batch_end(0); | ||
1197 | return; | 1202 | return; |
1203 | } | ||
1198 | 1204 | ||
1199 | /* | 1205 | /* |
1200 | * Extract the list of ready callbacks, disabling to prevent | 1206 | * Extract the list of ready callbacks, disabling to prevent |
1201 | * races with call_rcu() from interrupt handlers. | 1207 | * races with call_rcu() from interrupt handlers. |
1202 | */ | 1208 | */ |
1203 | local_irq_save(flags); | 1209 | local_irq_save(flags); |
1210 | bl = rdp->blimit; | ||
1211 | trace_rcu_batch_start(rdp->qlen, bl); | ||
1204 | list = rdp->nxtlist; | 1212 | list = rdp->nxtlist; |
1205 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; | 1213 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; |
1206 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; | 1214 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; |
@@ -1218,11 +1226,12 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1218 | debug_rcu_head_unqueue(list); | 1226 | debug_rcu_head_unqueue(list); |
1219 | __rcu_reclaim(list); | 1227 | __rcu_reclaim(list); |
1220 | list = next; | 1228 | list = next; |
1221 | if (++count >= rdp->blimit) | 1229 | if (++count >= bl) |
1222 | break; | 1230 | break; |
1223 | } | 1231 | } |
1224 | 1232 | ||
1225 | local_irq_save(flags); | 1233 | local_irq_save(flags); |
1234 | trace_rcu_batch_end(count); | ||
1226 | 1235 | ||
1227 | /* Update count, and requeue any remaining callbacks. */ | 1236 | /* Update count, and requeue any remaining callbacks. */ |
1228 | rdp->qlen -= count; | 1237 | rdp->qlen -= count; |