diff options
-rw-r--r-- | Documentation/RCU/trace.txt | 36 | ||||
-rw-r--r-- | kernel/rcutree.c | 113 | ||||
-rw-r--r-- | kernel/rcutree.h | 1 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 2 | ||||
-rw-r--r-- | kernel/rcutree_trace.c | 6 |
5 files changed, 87 insertions, 71 deletions
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index 49587abfc2f7..f6f15ce39903 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt | |||
@@ -33,23 +33,23 @@ rcu/rcuboost: | |||
33 | The output of "cat rcu/rcudata" looks as follows: | 33 | The output of "cat rcu/rcudata" looks as follows: |
34 | 34 | ||
35 | rcu_sched: | 35 | rcu_sched: |
36 | 0 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=545/1/0 df=50 of=0 ri=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0 | 36 | 0 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=545/1/0 df=50 of=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0 |
37 | 1 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=967/1/0 df=58 of=0 ri=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0 | 37 | 1 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=967/1/0 df=58 of=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0 |
38 | 2 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1081/1/0 df=175 of=0 ri=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0 | 38 | 2 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1081/1/0 df=175 of=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0 |
39 | 3 c=20942 g=20943 pq=1 pgp=20942 qp=1 dt=1846/0/0 df=404 of=0 ri=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0 | 39 | 3 c=20942 g=20943 pq=1 pgp=20942 qp=1 dt=1846/0/0 df=404 of=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0 |
40 | 4 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=369/1/0 df=83 of=0 ri=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0 | 40 | 4 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=369/1/0 df=83 of=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0 |
41 | 5 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=381/1/0 df=64 of=0 ri=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0 | 41 | 5 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=381/1/0 df=64 of=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0 |
42 | 6 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1037/1/0 df=183 of=0 ri=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0 | 42 | 6 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1037/1/0 df=183 of=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0 |
43 | 7 c=20897 g=20897 pq=1 pgp=20896 qp=0 dt=1572/0/0 df=382 of=0 ri=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0 | 43 | 7 c=20897 g=20897 pq=1 pgp=20896 qp=0 dt=1572/0/0 df=382 of=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0 |
44 | rcu_bh: | 44 | rcu_bh: |
45 | 0 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=545/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0 | 45 | 0 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=545/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0 |
46 | 1 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=967/1/0 df=3 of=0 ri=1 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0 | 46 | 1 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=967/1/0 df=3 of=0 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0 |
47 | 2 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1081/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0 | 47 | 2 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1081/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0 |
48 | 3 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1846/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0 | 48 | 3 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1846/0/0 df=8 of=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0 |
49 | 4 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=369/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0 | 49 | 4 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=369/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0 |
50 | 5 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=381/1/0 df=4 of=0 ri=1 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0 | 50 | 5 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=381/1/0 df=4 of=0 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0 |
51 | 6 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1037/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0 | 51 | 6 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1037/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0 |
52 | 7 c=1474 g=1474 pq=1 pgp=1473 qp=0 dt=1572/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0 | 52 | 7 c=1474 g=1474 pq=1 pgp=1473 qp=0 dt=1572/0/0 df=8 of=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0 |
53 | 53 | ||
54 | The first section lists the rcu_data structures for rcu_sched, the second | 54 | The first section lists the rcu_data structures for rcu_sched, the second |
55 | for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an | 55 | for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an |
@@ -119,10 +119,6 @@ o "of" is the number of times that some other CPU has forced a | |||
119 | CPU is offline when it is really alive and kicking) is a fatal | 119 | CPU is offline when it is really alive and kicking) is a fatal |
120 | error, so it makes sense to err conservatively. | 120 | error, so it makes sense to err conservatively. |
121 | 121 | ||
122 | o "ri" is the number of times that RCU has seen fit to send a | ||
123 | reschedule IPI to this CPU in order to get it to report a | ||
124 | quiescent state. | ||
125 | |||
126 | o "ql" is the number of RCU callbacks currently residing on | 122 | o "ql" is the number of RCU callbacks currently residing on |
127 | this CPU. This is the total number of callbacks, regardless | 123 | this CPU. This is the total number of callbacks, regardless |
128 | of what state they are in (new, waiting for grace period to | 124 | of what state they are in (new, waiting for grace period to |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 05470d4caba3..708469a06860 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -320,25 +320,18 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
320 | static int rcu_implicit_offline_qs(struct rcu_data *rdp) | 320 | static int rcu_implicit_offline_qs(struct rcu_data *rdp) |
321 | { | 321 | { |
322 | /* | 322 | /* |
323 | * If the CPU is offline, it is in a quiescent state. We can | 323 | * If the CPU is offline for more than a jiffy, it is in a quiescent |
324 | * trust its state not to change because interrupts are disabled. | 324 | * state. We can trust its state not to change because interrupts |
325 | * are disabled. The reason for the jiffy's worth of slack is to | ||
326 | * handle CPUs initializing on the way up and finding their way | ||
327 | * to the idle loop on the way down. | ||
325 | */ | 328 | */ |
326 | if (cpu_is_offline(rdp->cpu)) { | 329 | if (cpu_is_offline(rdp->cpu) && |
330 | ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) { | ||
327 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); | 331 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); |
328 | rdp->offline_fqs++; | 332 | rdp->offline_fqs++; |
329 | return 1; | 333 | return 1; |
330 | } | 334 | } |
331 | |||
332 | /* | ||
333 | * The CPU is online, so send it a reschedule IPI. This forces | ||
334 | * it through the scheduler, and (inefficiently) also handles cases | ||
335 | * where idle loops fail to inform RCU about the CPU being idle. | ||
336 | */ | ||
337 | if (rdp->cpu != smp_processor_id()) | ||
338 | smp_send_reschedule(rdp->cpu); | ||
339 | else | ||
340 | set_need_resched(); | ||
341 | rdp->resched_ipi++; | ||
342 | return 0; | 335 | return 0; |
343 | } | 336 | } |
344 | 337 | ||
@@ -601,19 +594,33 @@ EXPORT_SYMBOL(rcu_is_cpu_idle); | |||
601 | * this task being preempted, its old CPU being taken offline, resuming | 594 | * this task being preempted, its old CPU being taken offline, resuming |
602 | * on some other CPU, then determining that its old CPU is now offline. | 595 | * on some other CPU, then determining that its old CPU is now offline. |
603 | * It is OK to use RCU on an offline processor during initial boot, hence | 596 | * It is OK to use RCU on an offline processor during initial boot, hence |
604 | * the check for rcu_scheduler_fully_active. | 597 | * the check for rcu_scheduler_fully_active. Note also that it is OK |
598 | * for a CPU coming online to use RCU for one jiffy prior to marking itself | ||
599 | * online in the cpu_online_mask. Similarly, it is OK for a CPU going | ||
600 | * offline to continue to use RCU for one jiffy after marking itself | ||
601 | * offline in the cpu_online_mask. This leniency is necessary given the | ||
602 | * non-atomic nature of the online and offline processing, for example, | ||
603 | * the fact that a CPU enters the scheduler after completing the CPU_DYING | ||
604 | * notifiers. | ||
605 | * | ||
606 | * This is also why RCU internally marks CPUs online during the | ||
607 | * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. | ||
605 | * | 608 | * |
606 | * Disable checking if in an NMI handler because we cannot safely report | 609 | * Disable checking if in an NMI handler because we cannot safely report |
607 | * errors from NMI handlers anyway. | 610 | * errors from NMI handlers anyway. |
608 | */ | 611 | */ |
609 | bool rcu_lockdep_current_cpu_online(void) | 612 | bool rcu_lockdep_current_cpu_online(void) |
610 | { | 613 | { |
614 | struct rcu_data *rdp; | ||
615 | struct rcu_node *rnp; | ||
611 | bool ret; | 616 | bool ret; |
612 | 617 | ||
613 | if (in_nmi()) | 618 | if (in_nmi()) |
614 | return 1; | 619 | return 1; |
615 | preempt_disable(); | 620 | preempt_disable(); |
616 | ret = cpu_online(smp_processor_id()) || | 621 | rdp = &__get_cpu_var(rcu_sched_data); |
622 | rnp = rdp->mynode; | ||
623 | ret = (rdp->grpmask & rnp->qsmaskinit) || | ||
617 | !rcu_scheduler_fully_active; | 624 | !rcu_scheduler_fully_active; |
618 | preempt_enable(); | 625 | preempt_enable(); |
619 | return ret; | 626 | return ret; |
@@ -1308,14 +1315,12 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1308 | */ | 1315 | */ |
1309 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | 1316 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) |
1310 | { | 1317 | { |
1311 | unsigned long flags; | ||
1312 | int i; | 1318 | int i; |
1313 | unsigned long mask; | 1319 | unsigned long mask; |
1314 | int need_report; | ||
1315 | int receive_cpu = cpumask_any(cpu_online_mask); | 1320 | int receive_cpu = cpumask_any(cpu_online_mask); |
1316 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | 1321 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
1317 | struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); | 1322 | struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); |
1318 | struct rcu_node *rnp = rdp->mynode; /* For dying CPU. */ | 1323 | RCU_TRACE(struct rcu_node *rnp = rdp->mynode); /* For dying CPU. */ |
1319 | 1324 | ||
1320 | /* First, adjust the counts. */ | 1325 | /* First, adjust the counts. */ |
1321 | if (rdp->nxtlist != NULL) { | 1326 | if (rdp->nxtlist != NULL) { |
@@ -1381,32 +1386,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
1381 | "cpuofl"); | 1386 | "cpuofl"); |
1382 | rcu_report_qs_rdp(smp_processor_id(), rsp, rdp, rsp->gpnum); | 1387 | rcu_report_qs_rdp(smp_processor_id(), rsp, rdp, rsp->gpnum); |
1383 | /* Note that rcu_report_qs_rdp() might call trace_rcu_grace_period(). */ | 1388 | /* Note that rcu_report_qs_rdp() might call trace_rcu_grace_period(). */ |
1384 | |||
1385 | /* | ||
1386 | * Remove the dying CPU from the bitmasks in the rcu_node | ||
1387 | * hierarchy. Because we are in stop_machine() context, we | ||
1388 | * automatically exclude ->onofflock critical sections. | ||
1389 | */ | ||
1390 | do { | ||
1391 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1392 | rnp->qsmaskinit &= ~mask; | ||
1393 | if (rnp->qsmaskinit != 0) { | ||
1394 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1395 | break; | ||
1396 | } | ||
1397 | if (rnp == rdp->mynode) { | ||
1398 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); | ||
1399 | if (need_report & RCU_OFL_TASKS_NORM_GP) | ||
1400 | rcu_report_unblock_qs_rnp(rnp, flags); | ||
1401 | else | ||
1402 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1403 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
1404 | rcu_report_exp_rnp(rsp, rnp, true); | ||
1405 | } else | ||
1406 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1407 | mask = rnp->grpmask; | ||
1408 | rnp = rnp->parent; | ||
1409 | } while (rnp != NULL); | ||
1410 | } | 1389 | } |
1411 | 1390 | ||
1412 | /* | 1391 | /* |
@@ -1417,11 +1396,53 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
1417 | */ | 1396 | */ |
1418 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | 1397 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) |
1419 | { | 1398 | { |
1399 | unsigned long flags; | ||
1400 | unsigned long mask; | ||
1401 | int need_report = 0; | ||
1420 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1402 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1421 | struct rcu_node *rnp = rdp->mynode; | 1403 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rnp. */ |
1422 | 1404 | ||
1405 | /* Adjust any no-longer-needed kthreads. */ | ||
1423 | rcu_stop_cpu_kthread(cpu); | 1406 | rcu_stop_cpu_kthread(cpu); |
1424 | rcu_node_kthread_setaffinity(rnp, -1); | 1407 | rcu_node_kthread_setaffinity(rnp, -1); |
1408 | |||
1409 | /* Remove the dying CPU from the bitmasks in the rcu_node hierarchy. */ | ||
1410 | |||
1411 | /* Exclude any attempts to start a new grace period. */ | ||
1412 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | ||
1413 | |||
1414 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | ||
1415 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | ||
1416 | do { | ||
1417 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
1418 | rnp->qsmaskinit &= ~mask; | ||
1419 | if (rnp->qsmaskinit != 0) { | ||
1420 | if (rnp != rdp->mynode) | ||
1421 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
1422 | break; | ||
1423 | } | ||
1424 | if (rnp == rdp->mynode) | ||
1425 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); | ||
1426 | else | ||
1427 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
1428 | mask = rnp->grpmask; | ||
1429 | rnp = rnp->parent; | ||
1430 | } while (rnp != NULL); | ||
1431 | |||
1432 | /* | ||
1433 | * We still hold the leaf rcu_node structure lock here, and | ||
1434 | * irqs are still disabled. The reason for this subterfuge is | ||
1435 | * because invoking rcu_report_unblock_qs_rnp() with ->onofflock | ||
1436 | * held leads to deadlock. | ||
1437 | */ | ||
1438 | raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
1439 | rnp = rdp->mynode; | ||
1440 | if (need_report & RCU_OFL_TASKS_NORM_GP) | ||
1441 | rcu_report_unblock_qs_rnp(rnp, flags); | ||
1442 | else | ||
1443 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1444 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
1445 | rcu_report_exp_rnp(rsp, rnp, true); | ||
1425 | } | 1446 | } |
1426 | 1447 | ||
1427 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 1448 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index e2ac8ee415bb..cdd1be0a4072 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -289,7 +289,6 @@ struct rcu_data { | |||
289 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | 289 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ |
290 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ | 290 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ |
291 | unsigned long offline_fqs; /* Kicked due to being offline. */ | 291 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
292 | unsigned long resched_ipi; /* Sent a resched IPI. */ | ||
293 | 292 | ||
294 | /* 5) __rcu_pending() statistics. */ | 293 | /* 5) __rcu_pending() statistics. */ |
295 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ | 294 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 5e25ee327ccb..07f880445d8d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -610,7 +610,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
610 | * absolutely necessary, but this is a good performance/complexity | 610 | * absolutely necessary, but this is a good performance/complexity |
611 | * tradeoff. | 611 | * tradeoff. |
612 | */ | 612 | */ |
613 | if (rcu_preempt_blocked_readers_cgp(rnp)) | 613 | if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) |
614 | retval |= RCU_OFL_TASKS_NORM_GP; | 614 | retval |= RCU_OFL_TASKS_NORM_GP; |
615 | if (rcu_preempted_readers_exp(rnp)) | 615 | if (rcu_preempted_readers_exp(rnp)) |
616 | retval |= RCU_OFL_TASKS_EXP_GP; | 616 | retval |= RCU_OFL_TASKS_EXP_GP; |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index db0987c1e1bd..ed459edeff43 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -72,7 +72,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
72 | rdp->dynticks->dynticks_nesting, | 72 | rdp->dynticks->dynticks_nesting, |
73 | rdp->dynticks->dynticks_nmi_nesting, | 73 | rdp->dynticks->dynticks_nmi_nesting, |
74 | rdp->dynticks_fqs); | 74 | rdp->dynticks_fqs); |
75 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); | 75 | seq_printf(m, " of=%lu", rdp->offline_fqs); |
76 | seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c", | 76 | seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c", |
77 | rdp->qlen_lazy, rdp->qlen, | 77 | rdp->qlen_lazy, rdp->qlen, |
78 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | 78 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != |
@@ -144,7 +144,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
144 | rdp->dynticks->dynticks_nesting, | 144 | rdp->dynticks->dynticks_nesting, |
145 | rdp->dynticks->dynticks_nmi_nesting, | 145 | rdp->dynticks->dynticks_nmi_nesting, |
146 | rdp->dynticks_fqs); | 146 | rdp->dynticks_fqs); |
147 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); | 147 | seq_printf(m, ",%lu", rdp->offline_fqs); |
148 | seq_printf(m, ",%ld,%ld,\"%c%c%c%c\"", rdp->qlen_lazy, rdp->qlen, | 148 | seq_printf(m, ",%ld,%ld,\"%c%c%c%c\"", rdp->qlen_lazy, rdp->qlen, |
149 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | 149 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != |
150 | rdp->nxttail[RCU_NEXT_TAIL]], | 150 | rdp->nxttail[RCU_NEXT_TAIL]], |
@@ -168,7 +168,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) | |||
168 | { | 168 | { |
169 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); | 169 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); |
170 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); | 170 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); |
171 | seq_puts(m, "\"of\",\"ri\",\"qll\",\"ql\",\"qs\""); | 171 | seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\""); |
172 | #ifdef CONFIG_RCU_BOOST | 172 | #ifdef CONFIG_RCU_BOOST |
173 | seq_puts(m, "\"kt\",\"ktl\""); | 173 | seq_puts(m, "\"kt\",\"ktl\""); |
174 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 174 | #endif /* #ifdef CONFIG_RCU_BOOST */ |