aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-01-06 17:11:30 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-02-21 12:03:25 -0500
commit486e259340fc4c60474f2c14703e3b3634bb58ca (patch)
tree70a58702194588fa0773463523f72c682785d040 /kernel
parent0bb7b59d6e2b8440cd7097097dd4bbfc4d76ed07 (diff)
rcu: Avoid waking up CPUs having only kfree_rcu() callbacks
When CONFIG_RCU_FAST_NO_HZ is enabled, RCU will allow a given CPU to enter dyntick-idle mode even if it still has RCU callbacks queued. RCU avoids system hangs in this case by scheduling a timer for several jiffies in the future. However, if all of the callbacks on that CPU are from kfree_rcu(), there is no reason to wake the CPU up, as it is not a problem to defer freeing of memory. This commit therefore tracks the number of callbacks on a given CPU that are from kfree_rcu(), and avoids scheduling the timer if all of a given CPU's callbacks are from kfree_rcu(). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu.h4
-rw-r--r--kernel/rcutiny.c4
-rw-r--r--kernel/rcutree.c29
-rw-r--r--kernel/rcutree.h3
-rw-r--r--kernel/rcutree_plugin.h79
-rw-r--r--kernel/rcutree_trace.c8
6 files changed, 105 insertions, 22 deletions
diff --git a/kernel/rcu.h b/kernel/rcu.h
index aa88baab5f78..a074b0b43fc2 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -76,16 +76,18 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
76 76
77extern void kfree(const void *); 77extern void kfree(const void *);
78 78
79static inline void __rcu_reclaim(char *rn, struct rcu_head *head) 79static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
80{ 80{
81 unsigned long offset = (unsigned long)head->func; 81 unsigned long offset = (unsigned long)head->func;
82 82
83 if (__is_kfree_rcu_offset(offset)) { 83 if (__is_kfree_rcu_offset(offset)) {
84 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset)); 84 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
85 kfree((void *)head - offset); 85 kfree((void *)head - offset);
86 return 1;
86 } else { 87 } else {
87 RCU_TRACE(trace_rcu_invoke_callback(rn, head)); 88 RCU_TRACE(trace_rcu_invoke_callback(rn, head));
88 head->func(head); 89 head->func(head);
90 return 0;
89 } 91 }
90} 92}
91 93
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 8e00d461911e..4eb34fcc2a75 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -258,7 +258,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
258 258
259 /* If no RCU callbacks ready to invoke, just return. */ 259 /* If no RCU callbacks ready to invoke, just return. */
260 if (&rcp->rcucblist == rcp->donetail) { 260 if (&rcp->rcucblist == rcp->donetail) {
261 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); 261 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
262 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, 262 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
263 ACCESS_ONCE(rcp->rcucblist), 263 ACCESS_ONCE(rcp->rcucblist),
264 need_resched(), 264 need_resched(),
@@ -269,7 +269,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
269 269
270 /* Move the ready-to-invoke callbacks to a local list. */ 270 /* Move the ready-to-invoke callbacks to a local list. */
271 local_irq_save(flags); 271 local_irq_save(flags);
272 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); 272 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
273 list = rcp->rcucblist; 273 list = rcp->rcucblist;
274 rcp->rcucblist = *rcp->donetail; 274 rcp->rcucblist = *rcp->donetail;
275 *rcp->donetail = NULL; 275 *rcp->donetail = NULL;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 570f7530f4b3..acf2d67ad2f4 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1261,6 +1261,7 @@ static void rcu_send_cbs_to_online(struct rcu_state *rsp)
1261 1261
1262 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; 1262 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
1263 receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 1263 receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1264 receive_rdp->qlen_lazy += rdp->qlen_lazy;
1264 receive_rdp->qlen += rdp->qlen; 1265 receive_rdp->qlen += rdp->qlen;
1265 receive_rdp->n_cbs_adopted += rdp->qlen; 1266 receive_rdp->n_cbs_adopted += rdp->qlen;
1266 rdp->n_cbs_orphaned += rdp->qlen; 1267 rdp->n_cbs_orphaned += rdp->qlen;
@@ -1268,6 +1269,7 @@ static void rcu_send_cbs_to_online(struct rcu_state *rsp)
1268 rdp->nxtlist = NULL; 1269 rdp->nxtlist = NULL;
1269 for (i = 0; i < RCU_NEXT_SIZE; i++) 1270 for (i = 0; i < RCU_NEXT_SIZE; i++)
1270 rdp->nxttail[i] = &rdp->nxtlist; 1271 rdp->nxttail[i] = &rdp->nxtlist;
1272 rdp->qlen_lazy = 0;
1271 rdp->qlen = 0; 1273 rdp->qlen = 0;
1272} 1274}
1273 1275
@@ -1368,11 +1370,11 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1368{ 1370{
1369 unsigned long flags; 1371 unsigned long flags;
1370 struct rcu_head *next, *list, **tail; 1372 struct rcu_head *next, *list, **tail;
1371 int bl, count; 1373 int bl, count, count_lazy;
1372 1374
1373 /* If no callbacks are ready, just return.*/ 1375 /* If no callbacks are ready, just return.*/
1374 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 1376 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1375 trace_rcu_batch_start(rsp->name, 0, 0); 1377 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
1376 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), 1378 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
1377 need_resched(), is_idle_task(current), 1379 need_resched(), is_idle_task(current),
1378 rcu_is_callbacks_kthread()); 1380 rcu_is_callbacks_kthread());
@@ -1385,7 +1387,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1385 */ 1387 */
1386 local_irq_save(flags); 1388 local_irq_save(flags);
1387 bl = rdp->blimit; 1389 bl = rdp->blimit;
1388 trace_rcu_batch_start(rsp->name, rdp->qlen, bl); 1390 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
1389 list = rdp->nxtlist; 1391 list = rdp->nxtlist;
1390 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; 1392 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1391 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 1393 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
@@ -1396,12 +1398,13 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1396 local_irq_restore(flags); 1398 local_irq_restore(flags);
1397 1399
1398 /* Invoke callbacks. */ 1400 /* Invoke callbacks. */
1399 count = 0; 1401 count = count_lazy = 0;
1400 while (list) { 1402 while (list) {
1401 next = list->next; 1403 next = list->next;
1402 prefetch(next); 1404 prefetch(next);
1403 debug_rcu_head_unqueue(list); 1405 debug_rcu_head_unqueue(list);
1404 __rcu_reclaim(rsp->name, list); 1406 if (__rcu_reclaim(rsp->name, list))
1407 count_lazy++;
1405 list = next; 1408 list = next;
1406 /* Stop only if limit reached and CPU has something to do. */ 1409 /* Stop only if limit reached and CPU has something to do. */
1407 if (++count >= bl && 1410 if (++count >= bl &&
@@ -1416,6 +1419,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1416 rcu_is_callbacks_kthread()); 1419 rcu_is_callbacks_kthread());
1417 1420
1418 /* Update count, and requeue any remaining callbacks. */ 1421 /* Update count, and requeue any remaining callbacks. */
1422 rdp->qlen_lazy -= count_lazy;
1419 rdp->qlen -= count; 1423 rdp->qlen -= count;
1420 rdp->n_cbs_invoked += count; 1424 rdp->n_cbs_invoked += count;
1421 if (list != NULL) { 1425 if (list != NULL) {
@@ -1702,7 +1706,7 @@ static void invoke_rcu_core(void)
1702 1706
1703static void 1707static void
1704__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1708__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1705 struct rcu_state *rsp) 1709 struct rcu_state *rsp, bool lazy)
1706{ 1710{
1707 unsigned long flags; 1711 unsigned long flags;
1708 struct rcu_data *rdp; 1712 struct rcu_data *rdp;
@@ -1727,12 +1731,14 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1727 *rdp->nxttail[RCU_NEXT_TAIL] = head; 1731 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1728 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1732 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1729 rdp->qlen++; 1733 rdp->qlen++;
1734 if (lazy)
1735 rdp->qlen_lazy++;
1730 1736
1731 if (__is_kfree_rcu_offset((unsigned long)func)) 1737 if (__is_kfree_rcu_offset((unsigned long)func))
1732 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, 1738 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1733 rdp->qlen); 1739 rdp->qlen_lazy, rdp->qlen);
1734 else 1740 else
1735 trace_rcu_callback(rsp->name, head, rdp->qlen); 1741 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
1736 1742
1737 /* If interrupts were disabled, don't dive into RCU core. */ 1743 /* If interrupts were disabled, don't dive into RCU core. */
1738 if (irqs_disabled_flags(flags)) { 1744 if (irqs_disabled_flags(flags)) {
@@ -1779,16 +1785,16 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1779 */ 1785 */
1780void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1786void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1781{ 1787{
1782 __call_rcu(head, func, &rcu_sched_state); 1788 __call_rcu(head, func, &rcu_sched_state, 0);
1783} 1789}
1784EXPORT_SYMBOL_GPL(call_rcu_sched); 1790EXPORT_SYMBOL_GPL(call_rcu_sched);
1785 1791
1786/* 1792/*
1787 * Queue an RCU for invocation after a quicker grace period. 1793 * Queue an RCU callback for invocation after a quicker grace period.
1788 */ 1794 */
1789void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1795void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1790{ 1796{
1791 __call_rcu(head, func, &rcu_bh_state); 1797 __call_rcu(head, func, &rcu_bh_state, 0);
1792} 1798}
1793EXPORT_SYMBOL_GPL(call_rcu_bh); 1799EXPORT_SYMBOL_GPL(call_rcu_bh);
1794 1800
@@ -2036,6 +2042,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2036 rdp->nxtlist = NULL; 2042 rdp->nxtlist = NULL;
2037 for (i = 0; i < RCU_NEXT_SIZE; i++) 2043 for (i = 0; i < RCU_NEXT_SIZE; i++)
2038 rdp->nxttail[i] = &rdp->nxtlist; 2044 rdp->nxttail[i] = &rdp->nxtlist;
2045 rdp->qlen_lazy = 0;
2039 rdp->qlen = 0; 2046 rdp->qlen = 0;
2040 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 2047 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2041 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING); 2048 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index fddff92d6676..af2af3cc5e65 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -265,7 +265,8 @@ struct rcu_data {
265 */ 265 */
266 struct rcu_head *nxtlist; 266 struct rcu_head *nxtlist;
267 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 267 struct rcu_head **nxttail[RCU_NEXT_SIZE];
268 long qlen; /* # of queued callbacks */ 268 long qlen_lazy; /* # of lazy queued callbacks */
269 long qlen; /* # of queued callbacks, incl lazy */
269 long qlen_last_fqs_check; 270 long qlen_last_fqs_check;
270 /* qlen at last check for QS forcing */ 271 /* qlen at last check for QS forcing */
271 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ 272 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3680b6b35bf3..7adf232bb66b 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -671,10 +671,24 @@ static void rcu_preempt_do_callbacks(void)
671 */ 671 */
672void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 672void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
673{ 673{
674 __call_rcu(head, func, &rcu_preempt_state); 674 __call_rcu(head, func, &rcu_preempt_state, 0);
675} 675}
676EXPORT_SYMBOL_GPL(call_rcu); 676EXPORT_SYMBOL_GPL(call_rcu);
677 677
678/*
679 * Queue an RCU callback for lazy invocation after a grace period.
680 * This will likely be later named something like "call_rcu_lazy()",
681 * but this change will require some way of tagging the lazy RCU
682 * callbacks in the list of pending callbacks. Until then, this
683 * function may only be called from __kfree_rcu().
684 */
685void kfree_call_rcu(struct rcu_head *head,
686 void (*func)(struct rcu_head *rcu))
687{
688 __call_rcu(head, func, &rcu_preempt_state, 1);
689}
690EXPORT_SYMBOL_GPL(kfree_call_rcu);
691
678/** 692/**
679 * synchronize_rcu - wait until a grace period has elapsed. 693 * synchronize_rcu - wait until a grace period has elapsed.
680 * 694 *
@@ -1065,6 +1079,22 @@ static void rcu_preempt_process_callbacks(void)
1065} 1079}
1066 1080
1067/* 1081/*
1082 * Queue an RCU callback for lazy invocation after a grace period.
1083 * This will likely be later named something like "call_rcu_lazy()",
1084 * but this change will require some way of tagging the lazy RCU
1085 * callbacks in the list of pending callbacks. Until then, this
1086 * function may only be called from __kfree_rcu().
1087 *
1088 * Because there is no preemptible RCU, we use RCU-sched instead.
1089 */
1090void kfree_call_rcu(struct rcu_head *head,
1091 void (*func)(struct rcu_head *rcu))
1092{
1093 __call_rcu(head, func, &rcu_sched_state, 1);
1094}
1095EXPORT_SYMBOL_GPL(kfree_call_rcu);
1096
1097/*
1068 * Wait for an rcu-preempt grace period, but make it happen quickly. 1098 * Wait for an rcu-preempt grace period, but make it happen quickly.
1069 * But because preemptible RCU does not exist, map to rcu-sched. 1099 * But because preemptible RCU does not exist, map to rcu-sched.
1070 */ 1100 */
@@ -2052,6 +2082,48 @@ int rcu_needs_cpu(int cpu)
2052} 2082}
2053 2083
2054/* 2084/*
2085 * Does the specified flavor of RCU have non-lazy callbacks pending on
2086 * the specified CPU? Both RCU flavor and CPU are specified by the
2087 * rcu_data structure.
2088 */
2089static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
2090{
2091 return rdp->qlen != rdp->qlen_lazy;
2092}
2093
2094#ifdef CONFIG_TREE_PREEMPT_RCU
2095
2096/*
2097 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
2098 * is no RCU-preempt in the kernel.)
2099 */
2100static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2101{
2102 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
2103
2104 return __rcu_cpu_has_nonlazy_callbacks(rdp);
2105}
2106
2107#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2108
2109static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2110{
2111 return 0;
2112}
2113
2114#endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2115
2116/*
2117 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2118 */
2119static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2120{
2121 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2122 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2123 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2124}
2125
2126/*
2055 * Timer handler used to force CPU to start pushing its remaining RCU 2127 * Timer handler used to force CPU to start pushing its remaining RCU
2056 * callbacks in the case where it entered dyntick-idle mode with callbacks 2128 * callbacks in the case where it entered dyntick-idle mode with callbacks
2057 * pending. The hander doesn't really need to do anything because the 2129 * pending. The hander doesn't really need to do anything because the
@@ -2149,8 +2221,9 @@ static void rcu_prepare_for_idle(int cpu)
2149 trace_rcu_prep_idle("Dyntick with callbacks"); 2221 trace_rcu_prep_idle("Dyntick with callbacks");
2150 per_cpu(rcu_dyntick_drain, cpu) = 0; 2222 per_cpu(rcu_dyntick_drain, cpu) = 0;
2151 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2223 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2152 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), 2224 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2153 rcu_idle_gp_wait, HRTIMER_MODE_REL); 2225 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2226 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2154 return; /* Nothing more to do immediately. */ 2227 return; /* Nothing more to do immediately. */
2155 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2228 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2156 /* We have hit the limit, so time to give up. */ 2229 /* We have hit the limit, so time to give up. */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 654cfe67f0d1..db0987c1e1bd 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -73,8 +73,8 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
73 rdp->dynticks->dynticks_nmi_nesting, 73 rdp->dynticks->dynticks_nmi_nesting,
74 rdp->dynticks_fqs); 74 rdp->dynticks_fqs);
75 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 75 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
76 seq_printf(m, " ql=%ld qs=%c%c%c%c", 76 seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c",
77 rdp->qlen, 77 rdp->qlen_lazy, rdp->qlen,
78 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 78 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
79 rdp->nxttail[RCU_NEXT_TAIL]], 79 rdp->nxttail[RCU_NEXT_TAIL]],
80 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 80 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
@@ -145,7 +145,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
145 rdp->dynticks->dynticks_nmi_nesting, 145 rdp->dynticks->dynticks_nmi_nesting,
146 rdp->dynticks_fqs); 146 rdp->dynticks_fqs);
147 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 147 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
148 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, 148 seq_printf(m, ",%ld,%ld,\"%c%c%c%c\"", rdp->qlen_lazy, rdp->qlen,
149 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 149 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
150 rdp->nxttail[RCU_NEXT_TAIL]], 150 rdp->nxttail[RCU_NEXT_TAIL]],
151 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 151 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
@@ -168,7 +168,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
168{ 168{
169 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); 169 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); 171 seq_puts(m, "\"of\",\"ri\",\"qll\",\"ql\",\"qs\"");
172#ifdef CONFIG_RCU_BOOST 172#ifdef CONFIG_RCU_BOOST
173 seq_puts(m, "\"kt\",\"ktl\""); 173 seq_puts(m, "\"kt\",\"ktl\"");
174#endif /* #ifdef CONFIG_RCU_BOOST */ 174#endif /* #ifdef CONFIG_RCU_BOOST */