aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c87
-rw-r--r--kernel/rcutree_plugin.h32
2 files changed, 69 insertions, 50 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 97994a329d80..338f1d1c1c66 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -53,18 +53,36 @@
53#include <linux/delay.h> 53#include <linux/delay.h>
54#include <linux/stop_machine.h> 54#include <linux/stop_machine.h>
55#include <linux/random.h> 55#include <linux/random.h>
56#include <linux/ftrace_event.h>
56 57
57#include "rcutree.h" 58#include "rcutree.h"
58#include <trace/events/rcu.h> 59#include <trace/events/rcu.h>
59 60
60#include "rcu.h" 61#include "rcu.h"
61 62
63/*
64 * Strings used in tracepoints need to be exported via the
65 * tracing system such that tools like perf and trace-cmd can
66 * translate the string address pointers to actual text.
67 */
68#define TPS(x) tracepoint_string(x)
69
62/* Data structures. */ 70/* Data structures. */
63 71
64static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 72static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
65static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 73static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
66 74
75/*
76 * In order to export the rcu_state name to the tracing tools, it
77 * needs to be added in the __tracepoint_string section.
78 * This requires defining a separate variable tp_<sname>_varname
79 * that points to the string being used, and this will allow
80 * the tracing userspace tools to be able to decipher the string
81 * address to the matching string.
82 */
67#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ 83#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
84static char sname##_varname[] = #sname; \
85static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
68struct rcu_state sname##_state = { \ 86struct rcu_state sname##_state = { \
69 .level = { &sname##_state.node[0] }, \ 87 .level = { &sname##_state.node[0] }, \
70 .call = cr, \ 88 .call = cr, \
@@ -76,7 +94,7 @@ struct rcu_state sname##_state = { \
76 .orphan_donetail = &sname##_state.orphan_donelist, \ 94 .orphan_donetail = &sname##_state.orphan_donelist, \
77 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 95 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
78 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \ 96 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
79 .name = #sname, \ 97 .name = sname##_varname, \
80 .abbr = sabbr, \ 98 .abbr = sabbr, \
81}; \ 99}; \
82DEFINE_PER_CPU(struct rcu_data, sname##_data) 100DEFINE_PER_CPU(struct rcu_data, sname##_data)
@@ -176,7 +194,7 @@ void rcu_sched_qs(int cpu)
176 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); 194 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
177 195
178 if (rdp->passed_quiesce == 0) 196 if (rdp->passed_quiesce == 0)
179 trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs"); 197 trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
180 rdp->passed_quiesce = 1; 198 rdp->passed_quiesce = 1;
181} 199}
182 200
@@ -185,7 +203,7 @@ void rcu_bh_qs(int cpu)
185 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 203 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
186 204
187 if (rdp->passed_quiesce == 0) 205 if (rdp->passed_quiesce == 0)
188 trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); 206 trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
189 rdp->passed_quiesce = 1; 207 rdp->passed_quiesce = 1;
190} 208}
191 209
@@ -196,10 +214,10 @@ void rcu_bh_qs(int cpu)
196 */ 214 */
197void rcu_note_context_switch(int cpu) 215void rcu_note_context_switch(int cpu)
198{ 216{
199 trace_rcu_utilization("Start context switch"); 217 trace_rcu_utilization(TPS("Start context switch"));
200 rcu_sched_qs(cpu); 218 rcu_sched_qs(cpu);
201 rcu_preempt_note_context_switch(cpu); 219 rcu_preempt_note_context_switch(cpu);
202 trace_rcu_utilization("End context switch"); 220 trace_rcu_utilization(TPS("End context switch"));
203} 221}
204EXPORT_SYMBOL_GPL(rcu_note_context_switch); 222EXPORT_SYMBOL_GPL(rcu_note_context_switch);
205 223
@@ -343,11 +361,11 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
343static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 361static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
344 bool user) 362 bool user)
345{ 363{
346 trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); 364 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
347 if (!user && !is_idle_task(current)) { 365 if (!user && !is_idle_task(current)) {
348 struct task_struct *idle = idle_task(smp_processor_id()); 366 struct task_struct *idle = idle_task(smp_processor_id());
349 367
350 trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); 368 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
351 ftrace_dump(DUMP_ORIG); 369 ftrace_dump(DUMP_ORIG);
352 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 370 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
353 current->pid, current->comm, 371 current->pid, current->comm,
@@ -477,7 +495,7 @@ void rcu_irq_exit(void)
477 rdtp->dynticks_nesting--; 495 rdtp->dynticks_nesting--;
478 WARN_ON_ONCE(rdtp->dynticks_nesting < 0); 496 WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
479 if (rdtp->dynticks_nesting) 497 if (rdtp->dynticks_nesting)
480 trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); 498 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
481 else 499 else
482 rcu_eqs_enter_common(rdtp, oldval, true); 500 rcu_eqs_enter_common(rdtp, oldval, true);
483 local_irq_restore(flags); 501 local_irq_restore(flags);
@@ -499,11 +517,11 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
499 smp_mb__after_atomic_inc(); /* See above. */ 517 smp_mb__after_atomic_inc(); /* See above. */
500 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 518 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
501 rcu_cleanup_after_idle(smp_processor_id()); 519 rcu_cleanup_after_idle(smp_processor_id());
502 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); 520 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
503 if (!user && !is_idle_task(current)) { 521 if (!user && !is_idle_task(current)) {
504 struct task_struct *idle = idle_task(smp_processor_id()); 522 struct task_struct *idle = idle_task(smp_processor_id());
505 523
506 trace_rcu_dyntick("Error on exit: not idle task", 524 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
507 oldval, rdtp->dynticks_nesting); 525 oldval, rdtp->dynticks_nesting);
508 ftrace_dump(DUMP_ORIG); 526 ftrace_dump(DUMP_ORIG);
509 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 527 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -618,7 +636,7 @@ void rcu_irq_enter(void)
618 rdtp->dynticks_nesting++; 636 rdtp->dynticks_nesting++;
619 WARN_ON_ONCE(rdtp->dynticks_nesting == 0); 637 WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
620 if (oldval) 638 if (oldval)
621 trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); 639 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
622 else 640 else
623 rcu_eqs_exit_common(rdtp, oldval, true); 641 rcu_eqs_exit_common(rdtp, oldval, true);
624 local_irq_restore(flags); 642 local_irq_restore(flags);
@@ -773,7 +791,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
773 * of the current RCU grace period. 791 * of the current RCU grace period.
774 */ 792 */
775 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { 793 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
776 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti"); 794 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
777 rdp->dynticks_fqs++; 795 rdp->dynticks_fqs++;
778 return 1; 796 return 1;
779 } 797 }
@@ -793,7 +811,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
793 return 0; /* Grace period is not old enough. */ 811 return 0; /* Grace period is not old enough. */
794 barrier(); 812 barrier();
795 if (cpu_is_offline(rdp->cpu)) { 813 if (cpu_is_offline(rdp->cpu)) {
796 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); 814 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
797 rdp->offline_fqs++; 815 rdp->offline_fqs++;
798 return 1; 816 return 1;
799 } 817 }
@@ -1056,9 +1074,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1056 * grace period is already marked as needed, return to the caller. 1074 * grace period is already marked as needed, return to the caller.
1057 */ 1075 */
1058 c = rcu_cbs_completed(rdp->rsp, rnp); 1076 c = rcu_cbs_completed(rdp->rsp, rnp);
1059 trace_rcu_future_gp(rnp, rdp, c, "Startleaf"); 1077 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1060 if (rnp->need_future_gp[c & 0x1]) { 1078 if (rnp->need_future_gp[c & 0x1]) {
1061 trace_rcu_future_gp(rnp, rdp, c, "Prestartleaf"); 1079 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1062 return c; 1080 return c;
1063 } 1081 }
1064 1082
@@ -1072,7 +1090,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1072 if (rnp->gpnum != rnp->completed || 1090 if (rnp->gpnum != rnp->completed ||
1073 ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) { 1091 ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
1074 rnp->need_future_gp[c & 0x1]++; 1092 rnp->need_future_gp[c & 0x1]++;
1075 trace_rcu_future_gp(rnp, rdp, c, "Startedleaf"); 1093 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1076 return c; 1094 return c;
1077 } 1095 }
1078 1096
@@ -1100,7 +1118,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1100 * recorded, trace and leave. 1118 * recorded, trace and leave.
1101 */ 1119 */
1102 if (rnp_root->need_future_gp[c & 0x1]) { 1120 if (rnp_root->need_future_gp[c & 0x1]) {
1103 trace_rcu_future_gp(rnp, rdp, c, "Prestartedroot"); 1121 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1104 goto unlock_out; 1122 goto unlock_out;
1105 } 1123 }
1106 1124
@@ -1109,9 +1127,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1109 1127
1110 /* If a grace period is not already in progress, start one. */ 1128 /* If a grace period is not already in progress, start one. */
1111 if (rnp_root->gpnum != rnp_root->completed) { 1129 if (rnp_root->gpnum != rnp_root->completed) {
1112 trace_rcu_future_gp(rnp, rdp, c, "Startedleafroot"); 1130 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1113 } else { 1131 } else {
1114 trace_rcu_future_gp(rnp, rdp, c, "Startedroot"); 1132 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1115 rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); 1133 rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1116 } 1134 }
1117unlock_out: 1135unlock_out:
@@ -1135,7 +1153,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1135 rcu_nocb_gp_cleanup(rsp, rnp); 1153 rcu_nocb_gp_cleanup(rsp, rnp);
1136 rnp->need_future_gp[c & 0x1] = 0; 1154 rnp->need_future_gp[c & 0x1] = 0;
1137 needmore = rnp->need_future_gp[(c + 1) & 0x1]; 1155 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1138 trace_rcu_future_gp(rnp, rdp, c, needmore ? "CleanupMore" : "Cleanup"); 1156 trace_rcu_future_gp(rnp, rdp, c,
1157 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1139 return needmore; 1158 return needmore;
1140} 1159}
1141 1160
@@ -1203,9 +1222,9 @@ static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1203 1222
1204 /* Trace depending on how much we were able to accelerate. */ 1223 /* Trace depending on how much we were able to accelerate. */
1205 if (!*rdp->nxttail[RCU_WAIT_TAIL]) 1224 if (!*rdp->nxttail[RCU_WAIT_TAIL])
1206 trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB"); 1225 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1207 else 1226 else
1208 trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB"); 1227 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1209} 1228}
1210 1229
1211/* 1230/*
@@ -1271,7 +1290,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc
1271 1290
1272 /* Remember that we saw this grace-period completion. */ 1291 /* Remember that we saw this grace-period completion. */
1273 rdp->completed = rnp->completed; 1292 rdp->completed = rnp->completed;
1274 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend"); 1293 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1275 } 1294 }
1276 1295
1277 if (rdp->gpnum != rnp->gpnum) { 1296 if (rdp->gpnum != rnp->gpnum) {
@@ -1281,7 +1300,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc
1281 * go looking for one. 1300 * go looking for one.
1282 */ 1301 */
1283 rdp->gpnum = rnp->gpnum; 1302 rdp->gpnum = rnp->gpnum;
1284 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); 1303 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1285 rdp->passed_quiesce = 0; 1304 rdp->passed_quiesce = 0;
1286 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1305 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1287 zero_cpu_stall_ticks(rdp); 1306 zero_cpu_stall_ticks(rdp);
@@ -1324,7 +1343,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1324 1343
1325 /* Advance to a new grace period and initialize state. */ 1344 /* Advance to a new grace period and initialize state. */
1326 rsp->gpnum++; 1345 rsp->gpnum++;
1327 trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); 1346 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1328 record_gp_stall_check_time(rsp); 1347 record_gp_stall_check_time(rsp);
1329 raw_spin_unlock_irq(&rnp->lock); 1348 raw_spin_unlock_irq(&rnp->lock);
1330 1349
@@ -1446,7 +1465,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1446 rcu_nocb_gp_set(rnp, nocb); 1465 rcu_nocb_gp_set(rnp, nocb);
1447 1466
1448 rsp->completed = rsp->gpnum; /* Declare grace period done. */ 1467 rsp->completed = rsp->gpnum; /* Declare grace period done. */
1449 trace_rcu_grace_period(rsp->name, rsp->completed, "end"); 1468 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1450 rsp->fqs_state = RCU_GP_IDLE; 1469 rsp->fqs_state = RCU_GP_IDLE;
1451 rdp = this_cpu_ptr(rsp->rda); 1470 rdp = this_cpu_ptr(rsp->rda);
1452 rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ 1471 rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
@@ -1855,7 +1874,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1855 RCU_TRACE(mask = rdp->grpmask); 1874 RCU_TRACE(mask = rdp->grpmask);
1856 trace_rcu_grace_period(rsp->name, 1875 trace_rcu_grace_period(rsp->name,
1857 rnp->gpnum + 1 - !!(rnp->qsmask & mask), 1876 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
1858 "cpuofl"); 1877 TPS("cpuofl"));
1859} 1878}
1860 1879
1861/* 1880/*
@@ -2042,7 +2061,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2042 */ 2061 */
2043void rcu_check_callbacks(int cpu, int user) 2062void rcu_check_callbacks(int cpu, int user)
2044{ 2063{
2045 trace_rcu_utilization("Start scheduler-tick"); 2064 trace_rcu_utilization(TPS("Start scheduler-tick"));
2046 increment_cpu_stall_ticks(); 2065 increment_cpu_stall_ticks();
2047 if (user || rcu_is_cpu_rrupt_from_idle()) { 2066 if (user || rcu_is_cpu_rrupt_from_idle()) {
2048 2067
@@ -2075,7 +2094,7 @@ void rcu_check_callbacks(int cpu, int user)
2075 rcu_preempt_check_callbacks(cpu); 2094 rcu_preempt_check_callbacks(cpu);
2076 if (rcu_pending(cpu)) 2095 if (rcu_pending(cpu))
2077 invoke_rcu_core(); 2096 invoke_rcu_core();
2078 trace_rcu_utilization("End scheduler-tick"); 2097 trace_rcu_utilization(TPS("End scheduler-tick"));
2079} 2098}
2080 2099
2081/* 2100/*
@@ -2206,10 +2225,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
2206 2225
2207 if (cpu_is_offline(smp_processor_id())) 2226 if (cpu_is_offline(smp_processor_id()))
2208 return; 2227 return;
2209 trace_rcu_utilization("Start RCU core"); 2228 trace_rcu_utilization(TPS("Start RCU core"));
2210 for_each_rcu_flavor(rsp) 2229 for_each_rcu_flavor(rsp)
2211 __rcu_process_callbacks(rsp); 2230 __rcu_process_callbacks(rsp);
2212 trace_rcu_utilization("End RCU core"); 2231 trace_rcu_utilization(TPS("End RCU core"));
2213} 2232}
2214 2233
2215/* 2234/*
@@ -2950,7 +2969,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2950 rdp->completed = rnp->completed; 2969 rdp->completed = rnp->completed;
2951 rdp->passed_quiesce = 0; 2970 rdp->passed_quiesce = 0;
2952 rdp->qs_pending = 0; 2971 rdp->qs_pending = 0;
2953 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl"); 2972 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
2954 } 2973 }
2955 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ 2974 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
2956 rnp = rnp->parent; 2975 rnp = rnp->parent;
@@ -2980,7 +2999,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
2980 struct rcu_node *rnp = rdp->mynode; 2999 struct rcu_node *rnp = rdp->mynode;
2981 struct rcu_state *rsp; 3000 struct rcu_state *rsp;
2982 3001
2983 trace_rcu_utilization("Start CPU hotplug"); 3002 trace_rcu_utilization(TPS("Start CPU hotplug"));
2984 switch (action) { 3003 switch (action) {
2985 case CPU_UP_PREPARE: 3004 case CPU_UP_PREPARE:
2986 case CPU_UP_PREPARE_FROZEN: 3005 case CPU_UP_PREPARE_FROZEN:
@@ -3009,7 +3028,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
3009 default: 3028 default:
3010 break; 3029 break;
3011 } 3030 }
3012 trace_rcu_utilization("End CPU hotplug"); 3031 trace_rcu_utilization(TPS("End CPU hotplug"));
3013 return NOTIFY_OK; 3032 return NOTIFY_OK;
3014} 3033}
3015 3034
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 6976a7dde874..dff86f53ee09 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -167,7 +167,7 @@ static void rcu_preempt_qs(int cpu)
167 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 167 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
168 168
169 if (rdp->passed_quiesce == 0) 169 if (rdp->passed_quiesce == 0)
170 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); 170 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
171 rdp->passed_quiesce = 1; 171 rdp->passed_quiesce = 1;
172 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 172 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
173} 173}
@@ -386,7 +386,7 @@ void rcu_read_unlock_special(struct task_struct *t)
386 np = rcu_next_node_entry(t, rnp); 386 np = rcu_next_node_entry(t, rnp);
387 list_del_init(&t->rcu_node_entry); 387 list_del_init(&t->rcu_node_entry);
388 t->rcu_blocked_node = NULL; 388 t->rcu_blocked_node = NULL;
389 trace_rcu_unlock_preempted_task("rcu_preempt", 389 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
390 rnp->gpnum, t->pid); 390 rnp->gpnum, t->pid);
391 if (&t->rcu_node_entry == rnp->gp_tasks) 391 if (&t->rcu_node_entry == rnp->gp_tasks)
392 rnp->gp_tasks = np; 392 rnp->gp_tasks = np;
@@ -410,7 +410,7 @@ void rcu_read_unlock_special(struct task_struct *t)
410 */ 410 */
411 empty_exp_now = !rcu_preempted_readers_exp(rnp); 411 empty_exp_now = !rcu_preempted_readers_exp(rnp);
412 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 412 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
413 trace_rcu_quiescent_state_report("preempt_rcu", 413 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
414 rnp->gpnum, 414 rnp->gpnum,
415 0, rnp->qsmask, 415 0, rnp->qsmask,
416 rnp->level, 416 rnp->level,
@@ -1248,12 +1248,12 @@ static int rcu_boost_kthread(void *arg)
1248 int spincnt = 0; 1248 int spincnt = 0;
1249 int more2boost; 1249 int more2boost;
1250 1250
1251 trace_rcu_utilization("Start boost kthread@init"); 1251 trace_rcu_utilization(TPS("Start boost kthread@init"));
1252 for (;;) { 1252 for (;;) {
1253 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1253 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1254 trace_rcu_utilization("End boost kthread@rcu_wait"); 1254 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1255 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1255 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1256 trace_rcu_utilization("Start boost kthread@rcu_wait"); 1256 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1257 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1257 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1258 more2boost = rcu_boost(rnp); 1258 more2boost = rcu_boost(rnp);
1259 if (more2boost) 1259 if (more2boost)
@@ -1262,14 +1262,14 @@ static int rcu_boost_kthread(void *arg)
1262 spincnt = 0; 1262 spincnt = 0;
1263 if (spincnt > 10) { 1263 if (spincnt > 10) {
1264 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1264 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1265 trace_rcu_utilization("End boost kthread@rcu_yield"); 1265 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1266 schedule_timeout_interruptible(2); 1266 schedule_timeout_interruptible(2);
1267 trace_rcu_utilization("Start boost kthread@rcu_yield"); 1267 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1268 spincnt = 0; 1268 spincnt = 0;
1269 } 1269 }
1270 } 1270 }
1271 /* NOTREACHED */ 1271 /* NOTREACHED */
1272 trace_rcu_utilization("End boost kthread@notreached"); 1272 trace_rcu_utilization(TPS("End boost kthread@notreached"));
1273 return 0; 1273 return 0;
1274} 1274}
1275 1275
@@ -1417,7 +1417,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
1417 int spincnt; 1417 int spincnt;
1418 1418
1419 for (spincnt = 0; spincnt < 10; spincnt++) { 1419 for (spincnt = 0; spincnt < 10; spincnt++) {
1420 trace_rcu_utilization("Start CPU kthread@rcu_wait"); 1420 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1421 local_bh_disable(); 1421 local_bh_disable();
1422 *statusp = RCU_KTHREAD_RUNNING; 1422 *statusp = RCU_KTHREAD_RUNNING;
1423 this_cpu_inc(rcu_cpu_kthread_loops); 1423 this_cpu_inc(rcu_cpu_kthread_loops);
@@ -1429,15 +1429,15 @@ static void rcu_cpu_kthread(unsigned int cpu)
1429 rcu_kthread_do_work(); 1429 rcu_kthread_do_work();
1430 local_bh_enable(); 1430 local_bh_enable();
1431 if (*workp == 0) { 1431 if (*workp == 0) {
1432 trace_rcu_utilization("End CPU kthread@rcu_wait"); 1432 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1433 *statusp = RCU_KTHREAD_WAITING; 1433 *statusp = RCU_KTHREAD_WAITING;
1434 return; 1434 return;
1435 } 1435 }
1436 } 1436 }
1437 *statusp = RCU_KTHREAD_YIELDING; 1437 *statusp = RCU_KTHREAD_YIELDING;
1438 trace_rcu_utilization("Start CPU kthread@rcu_yield"); 1438 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1439 schedule_timeout_interruptible(2); 1439 schedule_timeout_interruptible(2);
1440 trace_rcu_utilization("End CPU kthread@rcu_yield"); 1440 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1441 *statusp = RCU_KTHREAD_WAITING; 1441 *statusp = RCU_KTHREAD_WAITING;
1442} 1442}
1443 1443
@@ -2200,7 +2200,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2200 * Wait for the grace period. Do so interruptibly to avoid messing 2200 * Wait for the grace period. Do so interruptibly to avoid messing
2201 * up the load average. 2201 * up the load average.
2202 */ 2202 */
2203 trace_rcu_future_gp(rnp, rdp, c, "StartWait"); 2203 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2204 for (;;) { 2204 for (;;) {
2205 wait_event_interruptible( 2205 wait_event_interruptible(
2206 rnp->nocb_gp_wq[c & 0x1], 2206 rnp->nocb_gp_wq[c & 0x1],
@@ -2208,9 +2208,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2208 if (likely(d)) 2208 if (likely(d))
2209 break; 2209 break;
2210 flush_signals(current); 2210 flush_signals(current);
2211 trace_rcu_future_gp(rnp, rdp, c, "ResumeWait"); 2211 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2212 } 2212 }
2213 trace_rcu_future_gp(rnp, rdp, c, "EndWait"); 2213 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2214 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 2214 smp_mb(); /* Ensure that CB invocation happens after GP end. */
2215} 2215}
2216 2216