diff options
-rw-r--r-- | include/trace/events/rcu.h | 73 | ||||
-rw-r--r-- | kernel/rcutree.c | 16 |
2 files changed, 68 insertions, 21 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index db3f6e9e63e6..ab458eb689fb 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
@@ -7,29 +7,58 @@ | |||
7 | #include <linux/tracepoint.h> | 7 | #include <linux/tracepoint.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Tracepoint for calling rcu_do_batch, performed to start callback invocation: | 10 | * Tracepoint for start/end markers used for utilization calculations. |
11 | * By convention, the string is of the following forms: | ||
12 | * | ||
13 | * "Start <activity>" -- Mark the start of the specified activity, | ||
14 | * such as "context switch". Nesting is permitted. | ||
15 | * "End <activity>" -- Mark the end of the specified activity. | ||
16 | */ | ||
17 | TRACE_EVENT(rcu_utilization, | ||
18 | |||
19 | TP_PROTO(char *s), | ||
20 | |||
21 | TP_ARGS(s), | ||
22 | |||
23 | TP_STRUCT__entry( | ||
24 | __field(char *, s) | ||
25 | ), | ||
26 | |||
27 | TP_fast_assign( | ||
28 | __entry->s = s; | ||
29 | ), | ||
30 | |||
31 | TP_printk("%s", __entry->s) | ||
32 | ); | ||
33 | |||
34 | /* | ||
35 | * Tracepoint for marking the beginning rcu_do_batch, performed to start | ||
36 | * RCU callback invocation. The first argument is the total number of | ||
37 | * callbacks (including those that are not yet ready to be invoked), | ||
38 | * and the second argument is the current RCU-callback batch limit. | ||
11 | */ | 39 | */ |
12 | TRACE_EVENT(rcu_batch_start, | 40 | TRACE_EVENT(rcu_batch_start, |
13 | 41 | ||
14 | TP_PROTO(long callbacks_ready, int blimit), | 42 | TP_PROTO(long qlen, int blimit), |
15 | 43 | ||
16 | TP_ARGS(callbacks_ready, blimit), | 44 | TP_ARGS(qlen, blimit), |
17 | 45 | ||
18 | TP_STRUCT__entry( | 46 | TP_STRUCT__entry( |
19 | __field( long, callbacks_ready ) | 47 | __field(long, qlen) |
20 | __field( int, blimit ) | 48 | __field(int, blimit) |
21 | ), | 49 | ), |
22 | 50 | ||
23 | TP_fast_assign( | 51 | TP_fast_assign( |
24 | __entry->callbacks_ready = callbacks_ready; | 52 | __entry->qlen = qlen; |
25 | __entry->blimit = blimit; | 53 | __entry->blimit = blimit; |
26 | ), | 54 | ), |
27 | 55 | ||
28 | TP_printk("CBs=%ld bl=%d", __entry->callbacks_ready, __entry->blimit) | 56 | TP_printk("CBs=%ld bl=%d", __entry->qlen, __entry->blimit) |
29 | ); | 57 | ); |
30 | 58 | ||
31 | /* | 59 | /* |
32 | * Tracepoint for the invocation of a single RCU callback | 60 | * Tracepoint for the invocation of a single RCU callback function. |
61 | * The argument is a pointer to the RCU callback itself. | ||
33 | */ | 62 | */ |
34 | TRACE_EVENT(rcu_invoke_callback, | 63 | TRACE_EVENT(rcu_invoke_callback, |
35 | 64 | ||
@@ -38,20 +67,23 @@ TRACE_EVENT(rcu_invoke_callback, | |||
38 | TP_ARGS(rhp), | 67 | TP_ARGS(rhp), |
39 | 68 | ||
40 | TP_STRUCT__entry( | 69 | TP_STRUCT__entry( |
41 | __field( void *, rhp ) | 70 | __field(void *, rhp) |
42 | __field( void *, func ) | 71 | __field(void *, func) |
43 | ), | 72 | ), |
44 | 73 | ||
45 | TP_fast_assign( | 74 | TP_fast_assign( |
46 | __entry->rhp = rhp; | 75 | __entry->rhp = rhp; |
47 | __entry->func = rhp->func; | 76 | __entry->func = rhp->func; |
48 | ), | 77 | ), |
49 | 78 | ||
50 | TP_printk("rhp=%p func=%pf", __entry->rhp, __entry->func) | 79 | TP_printk("rhp=%p func=%pf", __entry->rhp, __entry->func) |
51 | ); | 80 | ); |
52 | 81 | ||
53 | /* | 82 | /* |
54 | * Tracepoint for the invocation of a single RCU kfree callback | 83 | * Tracepoint for the invocation of a single RCU callback of the special |
84 | * kfree() form. The first argument is a pointer to the RCU callback | ||
85 | * and the second argument is the offset of the callback within the | ||
86 | * enclosing RCU-protected data structure. | ||
55 | */ | 87 | */ |
56 | TRACE_EVENT(rcu_invoke_kfree_callback, | 88 | TRACE_EVENT(rcu_invoke_kfree_callback, |
57 | 89 | ||
@@ -60,12 +92,12 @@ TRACE_EVENT(rcu_invoke_kfree_callback, | |||
60 | TP_ARGS(rhp, offset), | 92 | TP_ARGS(rhp, offset), |
61 | 93 | ||
62 | TP_STRUCT__entry( | 94 | TP_STRUCT__entry( |
63 | __field(void *, rhp ) | 95 | __field(void *, rhp) |
64 | __field(unsigned long, offset ) | 96 | __field(unsigned long, offset) |
65 | ), | 97 | ), |
66 | 98 | ||
67 | TP_fast_assign( | 99 | TP_fast_assign( |
68 | __entry->rhp = rhp; | 100 | __entry->rhp = rhp; |
69 | __entry->offset = offset; | 101 | __entry->offset = offset; |
70 | ), | 102 | ), |
71 | 103 | ||
@@ -73,7 +105,8 @@ TRACE_EVENT(rcu_invoke_kfree_callback, | |||
73 | ); | 105 | ); |
74 | 106 | ||
75 | /* | 107 | /* |
76 | * Tracepoint for leaving rcu_do_batch, performed after callback invocation: | 108 | * Tracepoint for exiting rcu_do_batch after RCU callbacks have been |
109 | * invoked. The first argument is the number of callbacks actually invoked. | ||
77 | */ | 110 | */ |
78 | TRACE_EVENT(rcu_batch_end, | 111 | TRACE_EVENT(rcu_batch_end, |
79 | 112 | ||
@@ -82,11 +115,11 @@ TRACE_EVENT(rcu_batch_end, | |||
82 | TP_ARGS(callbacks_invoked), | 115 | TP_ARGS(callbacks_invoked), |
83 | 116 | ||
84 | TP_STRUCT__entry( | 117 | TP_STRUCT__entry( |
85 | __field( int, callbacks_invoked ) | 118 | __field(int, callbacks_invoked) |
86 | ), | 119 | ), |
87 | 120 | ||
88 | TP_fast_assign( | 121 | TP_fast_assign( |
89 | __entry->callbacks_invoked = callbacks_invoked; | 122 | __entry->callbacks_invoked = callbacks_invoked; |
90 | ), | 123 | ), |
91 | 124 | ||
92 | TP_printk("CBs-invoked=%d", __entry->callbacks_invoked) | 125 | TP_printk("CBs-invoked=%d", __entry->callbacks_invoked) |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 45dcc2036a1e..2a9643bd6ae9 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -184,8 +184,10 @@ void rcu_bh_qs(int cpu) | |||
184 | */ | 184 | */ |
185 | void rcu_note_context_switch(int cpu) | 185 | void rcu_note_context_switch(int cpu) |
186 | { | 186 | { |
187 | trace_rcu_utilization("Start context switch"); | ||
187 | rcu_sched_qs(cpu); | 188 | rcu_sched_qs(cpu); |
188 | rcu_preempt_note_context_switch(cpu); | 189 | rcu_preempt_note_context_switch(cpu); |
190 | trace_rcu_utilization("End context switch"); | ||
189 | } | 191 | } |
190 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 192 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
191 | 193 | ||
@@ -1275,6 +1277,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1275 | */ | 1277 | */ |
1276 | void rcu_check_callbacks(int cpu, int user) | 1278 | void rcu_check_callbacks(int cpu, int user) |
1277 | { | 1279 | { |
1280 | trace_rcu_utilization("Start scheduler-tick"); | ||
1278 | if (user || | 1281 | if (user || |
1279 | (idle_cpu(cpu) && rcu_scheduler_active && | 1282 | (idle_cpu(cpu) && rcu_scheduler_active && |
1280 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1283 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
@@ -1308,6 +1311,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1308 | rcu_preempt_check_callbacks(cpu); | 1311 | rcu_preempt_check_callbacks(cpu); |
1309 | if (rcu_pending(cpu)) | 1312 | if (rcu_pending(cpu)) |
1310 | invoke_rcu_core(); | 1313 | invoke_rcu_core(); |
1314 | trace_rcu_utilization("End scheduler-tick"); | ||
1311 | } | 1315 | } |
1312 | 1316 | ||
1313 | #ifdef CONFIG_SMP | 1317 | #ifdef CONFIG_SMP |
@@ -1369,10 +1373,14 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1369 | unsigned long flags; | 1373 | unsigned long flags; |
1370 | struct rcu_node *rnp = rcu_get_root(rsp); | 1374 | struct rcu_node *rnp = rcu_get_root(rsp); |
1371 | 1375 | ||
1372 | if (!rcu_gp_in_progress(rsp)) | 1376 | trace_rcu_utilization("Start fqs"); |
1377 | if (!rcu_gp_in_progress(rsp)) { | ||
1378 | trace_rcu_utilization("End fqs"); | ||
1373 | return; /* No grace period in progress, nothing to force. */ | 1379 | return; /* No grace period in progress, nothing to force. */ |
1380 | } | ||
1374 | if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { | 1381 | if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { |
1375 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ | 1382 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ |
1383 | trace_rcu_utilization("End fqs"); | ||
1376 | return; /* Someone else is already on the job. */ | 1384 | return; /* Someone else is already on the job. */ |
1377 | } | 1385 | } |
1378 | if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) | 1386 | if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) |
@@ -1421,11 +1429,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1421 | raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ | 1429 | raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ |
1422 | rsp->fqs_need_gp = 0; | 1430 | rsp->fqs_need_gp = 0; |
1423 | rcu_start_gp(rsp, flags); /* releases rnp->lock */ | 1431 | rcu_start_gp(rsp, flags); /* releases rnp->lock */ |
1432 | trace_rcu_utilization("End fqs"); | ||
1424 | return; | 1433 | return; |
1425 | } | 1434 | } |
1426 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | 1435 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
1427 | unlock_fqs_ret: | 1436 | unlock_fqs_ret: |
1428 | raw_spin_unlock_irqrestore(&rsp->fqslock, flags); | 1437 | raw_spin_unlock_irqrestore(&rsp->fqslock, flags); |
1438 | trace_rcu_utilization("End fqs"); | ||
1429 | } | 1439 | } |
1430 | 1440 | ||
1431 | #else /* #ifdef CONFIG_SMP */ | 1441 | #else /* #ifdef CONFIG_SMP */ |
@@ -1481,6 +1491,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1481 | */ | 1491 | */ |
1482 | static void rcu_process_callbacks(struct softirq_action *unused) | 1492 | static void rcu_process_callbacks(struct softirq_action *unused) |
1483 | { | 1493 | { |
1494 | trace_rcu_utilization("Start RCU core"); | ||
1484 | __rcu_process_callbacks(&rcu_sched_state, | 1495 | __rcu_process_callbacks(&rcu_sched_state, |
1485 | &__get_cpu_var(rcu_sched_data)); | 1496 | &__get_cpu_var(rcu_sched_data)); |
1486 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1497 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
@@ -1488,6 +1499,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1488 | 1499 | ||
1489 | /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ | 1500 | /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ |
1490 | rcu_needs_cpu_flush(); | 1501 | rcu_needs_cpu_flush(); |
1502 | trace_rcu_utilization("End RCU core"); | ||
1491 | } | 1503 | } |
1492 | 1504 | ||
1493 | /* | 1505 | /* |
@@ -1910,6 +1922,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1910 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 1922 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
1911 | struct rcu_node *rnp = rdp->mynode; | 1923 | struct rcu_node *rnp = rdp->mynode; |
1912 | 1924 | ||
1925 | trace_rcu_utilization("Start CPU hotplug"); | ||
1913 | switch (action) { | 1926 | switch (action) { |
1914 | case CPU_UP_PREPARE: | 1927 | case CPU_UP_PREPARE: |
1915 | case CPU_UP_PREPARE_FROZEN: | 1928 | case CPU_UP_PREPARE_FROZEN: |
@@ -1945,6 +1958,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1945 | default: | 1958 | default: |
1946 | break; | 1959 | break; |
1947 | } | 1960 | } |
1961 | trace_rcu_utilization("End CPU hotplug"); | ||
1948 | return NOTIFY_OK; | 1962 | return NOTIFY_OK; |
1949 | } | 1963 | } |
1950 | 1964 | ||