aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcutree.c59
-rw-r--r--kernel/rcutree.h8
-rw-r--r--kernel/rcutree_plugin.h41
-rw-r--r--kernel/rcutree_trace.c32
4 files changed, 99 insertions, 41 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ae5c9ea68662..429d4949f0eb 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
87int rcu_scheduler_active __read_mostly; 87int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 88EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 89
90#ifdef CONFIG_RCU_BOOST
91
90/* 92/*
91 * Control variables for per-CPU and per-rcu_node kthreads. These 93 * Control variables for per-CPU and per-rcu_node kthreads. These
92 * handle all flavors of RCU. 94 * handle all flavors of RCU.
@@ -98,9 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
98DEFINE_PER_CPU(char, rcu_cpu_has_work); 100DEFINE_PER_CPU(char, rcu_cpu_has_work);
99static char rcu_kthreads_spawnable; 101static char rcu_kthreads_spawnable;
100 102
103#endif /* #ifdef CONFIG_RCU_BOOST */
104
101static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 105static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
102static void invoke_rcu_cpu_kthread(void); 106static void invoke_rcu_core(void);
103static void __invoke_rcu_cpu_kthread(void); 107static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
104 108
105#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ 109#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
106 110
@@ -1089,6 +1093,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1089 int need_report = 0; 1093 int need_report = 0;
1090 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1094 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1091 struct rcu_node *rnp; 1095 struct rcu_node *rnp;
1096#ifdef CONFIG_RCU_BOOST
1092 struct task_struct *t; 1097 struct task_struct *t;
1093 1098
1094 /* Stop the CPU's kthread. */ 1099 /* Stop the CPU's kthread. */
@@ -1097,6 +1102,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1097 per_cpu(rcu_cpu_kthread_task, cpu) = NULL; 1102 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1098 kthread_stop(t); 1103 kthread_stop(t);
1099 } 1104 }
1105#endif /* #ifdef CONFIG_RCU_BOOST */
1100 1106
1101 /* Exclude any attempts to start a new grace period. */ 1107 /* Exclude any attempts to start a new grace period. */
1102 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1108 raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1232,7 +1238,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1232 1238
1233 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1239 /* Re-raise the RCU softirq if there are callbacks remaining. */
1234 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1240 if (cpu_has_callbacks_ready_to_invoke(rdp))
1235 invoke_rcu_cpu_kthread(); 1241 invoke_rcu_core();
1236} 1242}
1237 1243
1238/* 1244/*
@@ -1278,7 +1284,7 @@ void rcu_check_callbacks(int cpu, int user)
1278 } 1284 }
1279 rcu_preempt_check_callbacks(cpu); 1285 rcu_preempt_check_callbacks(cpu);
1280 if (rcu_pending(cpu)) 1286 if (rcu_pending(cpu))
1281 invoke_rcu_cpu_kthread(); 1287 invoke_rcu_core();
1282} 1288}
1283 1289
1284#ifdef CONFIG_SMP 1290#ifdef CONFIG_SMP
@@ -1444,9 +1450,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1444 1450
1445 /* If there are callbacks ready, invoke them. */ 1451 /* If there are callbacks ready, invoke them. */
1446 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1452 if (cpu_has_callbacks_ready_to_invoke(rdp))
1447 __invoke_rcu_cpu_kthread(); 1453 invoke_rcu_callbacks(rsp, rdp);
1448} 1454}
1449 1455
1456#ifdef CONFIG_RCU_BOOST
1457
1450static void rcu_kthread_do_work(void) 1458static void rcu_kthread_do_work(void)
1451{ 1459{
1452 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); 1460 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
@@ -1454,6 +1462,8 @@ static void rcu_kthread_do_work(void)
1454 rcu_preempt_do_callbacks(); 1462 rcu_preempt_do_callbacks();
1455} 1463}
1456 1464
1465#endif /* #ifdef CONFIG_RCU_BOOST */
1466
1457/* 1467/*
1458 * Do softirq processing for the current CPU. 1468 * Do softirq processing for the current CPU.
1459 */ 1469 */
@@ -1474,25 +1484,22 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1474 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task 1484 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
1475 * cannot disappear out from under us. 1485 * cannot disappear out from under us.
1476 */ 1486 */
1477static void __invoke_rcu_cpu_kthread(void) 1487static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1478{ 1488{
1479 unsigned long flags; 1489 if (likely(!rsp->boost)) {
1480 1490 rcu_do_batch(rsp, rdp);
1481 local_irq_save(flags);
1482 __this_cpu_write(rcu_cpu_has_work, 1);
1483 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1484 local_irq_restore(flags);
1485 return; 1491 return;
1486 } 1492 }
1487 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); 1493 invoke_rcu_callbacks_kthread();
1488 local_irq_restore(flags);
1489} 1494}
1490 1495
1491static void invoke_rcu_cpu_kthread(void) 1496static void invoke_rcu_core(void)
1492{ 1497{
1493 raise_softirq(RCU_SOFTIRQ); 1498 raise_softirq(RCU_SOFTIRQ);
1494} 1499}
1495 1500
1501#ifdef CONFIG_RCU_BOOST
1502
1496/* 1503/*
1497 * Wake up the specified per-rcu_node-structure kthread. 1504 * Wake up the specified per-rcu_node-structure kthread.
1498 * Because the per-rcu_node kthreads are immortal, we don't need 1505 * Because the per-rcu_node kthreads are immortal, we don't need
@@ -1818,6 +1825,18 @@ static int __init rcu_spawn_kthreads(void)
1818} 1825}
1819early_initcall(rcu_spawn_kthreads); 1826early_initcall(rcu_spawn_kthreads);
1820 1827
1828#else /* #ifdef CONFIG_RCU_BOOST */
1829
1830static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1831{
1832}
1833
1834static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1835{
1836}
1837
1838#endif /* #else #ifdef CONFIG_RCU_BOOST */
1839
1821static void 1840static void
1822__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1841__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1823 struct rcu_state *rsp) 1842 struct rcu_state *rsp)
@@ -2224,6 +2243,8 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
2224 rcu_preempt_init_percpu_data(cpu); 2243 rcu_preempt_init_percpu_data(cpu);
2225} 2244}
2226 2245
2246#ifdef CONFIG_RCU_BOOST
2247
2227static void __cpuinit rcu_prepare_kthreads(int cpu) 2248static void __cpuinit rcu_prepare_kthreads(int cpu)
2228{ 2249{
2229 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); 2250 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
@@ -2237,6 +2258,14 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
2237 } 2258 }
2238} 2259}
2239 2260
2261#else /* #ifdef CONFIG_RCU_BOOST */
2262
2263static void __cpuinit rcu_prepare_kthreads(int cpu)
2264{
2265}
2266
2267#endif /* #else #ifdef CONFIG_RCU_BOOST */
2268
2240/* 2269/*
2241 * Handle CPU online/offline notification events. 2270 * Handle CPU online/offline notification events.
2242 */ 2271 */
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 0fed6b934d2a..434288c7ad88 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -369,6 +369,7 @@ struct rcu_state {
369 /* period because */ 369 /* period because */
370 /* force_quiescent_state() */ 370 /* force_quiescent_state() */
371 /* was running. */ 371 /* was running. */
372 u8 boost; /* Subject to priority boost. */
372 unsigned long gpnum; /* Current gp number. */ 373 unsigned long gpnum; /* Current gp number. */
373 unsigned long completed; /* # of last completed gp. */ 374 unsigned long completed; /* # of last completed gp. */
374 375
@@ -439,7 +440,6 @@ static void rcu_preempt_offline_cpu(int cpu);
439#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 440#endif /* #ifdef CONFIG_HOTPLUG_CPU */
440static void rcu_preempt_check_callbacks(int cpu); 441static void rcu_preempt_check_callbacks(int cpu);
441static void rcu_preempt_process_callbacks(void); 442static void rcu_preempt_process_callbacks(void);
442static void rcu_preempt_do_callbacks(void);
443void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 443void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
444#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) 444#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
445static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); 445static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
@@ -451,11 +451,15 @@ static void rcu_preempt_send_cbs_to_online(void);
451static void __init __rcu_init_preempt(void); 451static void __init __rcu_init_preempt(void);
452static void rcu_needs_cpu_flush(void); 452static void rcu_needs_cpu_flush(void);
453static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 453static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
454static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
455static void invoke_rcu_callbacks_kthread(void);
456#ifdef CONFIG_RCU_BOOST
457static void rcu_preempt_do_callbacks(void);
454static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 458static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
455 cpumask_var_t cm); 459 cpumask_var_t cm);
456static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
457static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 460static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
458 struct rcu_node *rnp, 461 struct rcu_node *rnp,
459 int rnp_index); 462 int rnp_index);
463#endif /* #ifdef CONFIG_RCU_BOOST */
460 464
461#endif /* #ifndef RCU_TREE_NONCORE */ 465#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 38d09c5f2b41..2772386c0421 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -602,11 +602,15 @@ static void rcu_preempt_process_callbacks(void)
602 &__get_cpu_var(rcu_preempt_data)); 602 &__get_cpu_var(rcu_preempt_data));
603} 603}
604 604
605#ifdef CONFIG_RCU_BOOST
606
605static void rcu_preempt_do_callbacks(void) 607static void rcu_preempt_do_callbacks(void)
606{ 608{
607 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); 609 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
608} 610}
609 611
612#endif /* #ifdef CONFIG_RCU_BOOST */
613
610/* 614/*
611 * Queue a preemptible-RCU callback for invocation after a grace period. 615 * Queue a preemptible-RCU callback for invocation after a grace period.
612 */ 616 */
@@ -1002,10 +1006,6 @@ static void rcu_preempt_process_callbacks(void)
1002{ 1006{
1003} 1007}
1004 1008
1005static void rcu_preempt_do_callbacks(void)
1006{
1007}
1008
1009/* 1009/*
1010 * Wait for an rcu-preempt grace period, but make it happen quickly. 1010 * Wait for an rcu-preempt grace period, but make it happen quickly.
1011 * But because preemptible RCU does not exist, map to rcu-sched. 1011 * But because preemptible RCU does not exist, map to rcu-sched.
@@ -1258,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1258} 1258}
1259 1259
1260/* 1260/*
1261 * Wake up the per-CPU kthread to invoke RCU callbacks.
1262 */
1263static void invoke_rcu_callbacks_kthread(void)
1264{
1265 unsigned long flags;
1266
1267 local_irq_save(flags);
1268 __this_cpu_write(rcu_cpu_has_work, 1);
1269 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1270 local_irq_restore(flags);
1271 return;
1272 }
1273 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1274 local_irq_restore(flags);
1275}
1276
1277/*
1261 * Set the affinity of the boost kthread. The CPU-hotplug locks are 1278 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1262 * held, so no one should be messing with the existence of the boost 1279 * held, so no one should be messing with the existence of the boost
1263 * kthread. 1280 * kthread.
@@ -1297,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1297 1314
1298 if (&rcu_preempt_state != rsp) 1315 if (&rcu_preempt_state != rsp)
1299 return 0; 1316 return 0;
1317 rsp->boost = 1;
1300 if (rnp->boost_kthread_task != NULL) 1318 if (rnp->boost_kthread_task != NULL)
1301 return 0; 1319 return 0;
1302 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1320 t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1319,22 +1337,15 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1319 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1337 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1320} 1338}
1321 1339
1322static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 1340static void invoke_rcu_callbacks_kthread(void)
1323 cpumask_var_t cm)
1324{ 1341{
1342 WARN_ON_ONCE(1);
1325} 1343}
1326 1344
1327static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1345static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1328{ 1346{
1329} 1347}
1330 1348
1331static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1332 struct rcu_node *rnp,
1333 int rnp_index)
1334{
1335 return 0;
1336}
1337
1338#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1349#endif /* #else #ifdef CONFIG_RCU_BOOST */
1339 1350
1340#ifndef CONFIG_SMP 1351#ifndef CONFIG_SMP
@@ -1509,7 +1520,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1509 * 1520 *
1510 * Because it is not legal to invoke rcu_process_callbacks() with irqs 1521 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1511 * disabled, we do one pass of force_quiescent_state(), then do a 1522 * disabled, we do one pass of force_quiescent_state(), then do a
1512 * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked 1523 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1513 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 1524 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1514 */ 1525 */
1515int rcu_needs_cpu(int cpu) 1526int rcu_needs_cpu(int cpu)
@@ -1560,7 +1571,7 @@ int rcu_needs_cpu(int cpu)
1560 1571
1561 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1572 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1562 if (c) 1573 if (c)
1563 invoke_rcu_cpu_kthread(); 1574 invoke_rcu_core();
1564 return c; 1575 return c;
1565} 1576}
1566 1577
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9678cc3650f5..4e144876dc68 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "rcutree.h" 47#include "rcutree.h"
48 48
49#ifdef CONFIG_RCU_BOOST
50
49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); 52DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 53DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
58 return "SRWOY"[kthread_status]; 60 return "SRWOY"[kthread_status];
59} 61}
60 62
63#endif /* #ifdef CONFIG_RCU_BOOST */
64
61static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 65static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
62{ 66{
63 if (!rdp->beenonline) 67 if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76 rdp->dynticks_fqs); 80 rdp->dynticks_fqs);
77#endif /* #ifdef CONFIG_NO_HZ */ 81#endif /* #ifdef CONFIG_NO_HZ */
78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 82 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
79 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", 83 seq_printf(m, " ql=%ld qs=%c%c%c%c",
80 rdp->qlen, 84 rdp->qlen,
81 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 85 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
82 rdp->nxttail[RCU_NEXT_TAIL]], 86 rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
84 rdp->nxttail[RCU_NEXT_READY_TAIL]], 88 rdp->nxttail[RCU_NEXT_READY_TAIL]],
85 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 89 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
86 rdp->nxttail[RCU_WAIT_TAIL]], 90 rdp->nxttail[RCU_WAIT_TAIL]],
87 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 91 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
92#ifdef CONFIG_RCU_BOOST
93 seq_printf(m, " kt=%d/%c/%d ktl=%x",
88 per_cpu(rcu_cpu_has_work, rdp->cpu), 94 per_cpu(rcu_cpu_has_work, rdp->cpu),
89 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 95 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
90 rdp->cpu)), 96 rdp->cpu)),
91 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), 97 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
92 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, 98 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
93 rdp->blimit); 99#endif /* #ifdef CONFIG_RCU_BOOST */
100 seq_printf(m, " b=%ld", rdp->blimit);
94 seq_printf(m, " ci=%lu co=%lu ca=%lu\n", 101 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
95 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 102 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
96} 103}
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
147 rdp->dynticks_fqs); 154 rdp->dynticks_fqs);
148#endif /* #ifdef CONFIG_NO_HZ */ 155#endif /* #ifdef CONFIG_NO_HZ */
149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 156 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
150 seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, 157 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
151 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 158 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
152 rdp->nxttail[RCU_NEXT_TAIL]], 159 rdp->nxttail[RCU_NEXT_TAIL]],
153 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 160 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
154 rdp->nxttail[RCU_NEXT_READY_TAIL]], 161 rdp->nxttail[RCU_NEXT_READY_TAIL]],
155 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 162 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
156 rdp->nxttail[RCU_WAIT_TAIL]], 163 rdp->nxttail[RCU_WAIT_TAIL]],
157 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 164 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
165#ifdef CONFIG_RCU_BOOST
166 seq_printf(m, ",%d,\"%c\"",
158 per_cpu(rcu_cpu_has_work, rdp->cpu), 167 per_cpu(rcu_cpu_has_work, rdp->cpu),
159 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 168 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
160 rdp->cpu)), 169 rdp->cpu)));
161 rdp->blimit); 170#endif /* #ifdef CONFIG_RCU_BOOST */
171 seq_printf(m, ",%ld", rdp->blimit);
162 seq_printf(m, ",%lu,%lu,%lu\n", 172 seq_printf(m, ",%lu,%lu,%lu\n",
163 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 173 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
164} 174}
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
169#ifdef CONFIG_NO_HZ 179#ifdef CONFIG_NO_HZ
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 180 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171#endif /* #ifdef CONFIG_NO_HZ */ 181#endif /* #ifdef CONFIG_NO_HZ */
172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); 182 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
183#ifdef CONFIG_RCU_BOOST
184 seq_puts(m, "\"kt\",\"ktl\"");
185#endif /* #ifdef CONFIG_RCU_BOOST */
186 seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
173#ifdef CONFIG_TREE_PREEMPT_RCU 187#ifdef CONFIG_TREE_PREEMPT_RCU
174 seq_puts(m, "\"rcu_preempt:\"\n"); 188 seq_puts(m, "\"rcu_preempt:\"\n");
175 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); 189 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);