aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-18 19:01:46 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-26 14:23:12 -0400
commitc7e88067c1ae89e7bcbed070fb2c4e30bc39b51f (patch)
tree4625614c62fe99fd72292cb9c0e60f2f2ff23b2c
parentd160a727c40e7175aa642137910a3fda46262fc8 (diff)
srcu: Exact tracking of srcu_data structures containing callbacks
The current Tree SRCU implementation schedules a workqueue for every srcu_data covered by a given leaf srcu_node structure having callbacks, even if only one of those srcu_data structures actually contains callbacks. This is clearly inefficient for workloads that don't feature callbacks everywhere all the time. This commit therefore adds an array of masks that are used by the leaf srcu_node structures to track exactly which srcu_data structures contain callbacks. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Mike Galbraith <efault@gmx.de>
-rw-r--r--include/linux/srcutree.h4
-rw-r--r--kernel/rcu/srcutree.c29
2 files changed, 27 insertions, 6 deletions
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 0400e211aa44..94515ff226fb 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -47,6 +47,8 @@ struct srcu_data {
47 struct delayed_work work; /* Context for CB invoking. */ 47 struct delayed_work work; /* Context for CB invoking. */
48 struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ 48 struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
49 struct srcu_node *mynode; /* Leaf srcu_node. */ 49 struct srcu_node *mynode; /* Leaf srcu_node. */
50 unsigned long grpmask; /* Mask for leaf srcu_node */
51 /* ->srcu_data_have_cbs[]. */
50 int cpu; 52 int cpu;
51 struct srcu_struct *sp; 53 struct srcu_struct *sp;
52}; 54};
@@ -59,6 +61,8 @@ struct srcu_node {
59 unsigned long srcu_have_cbs[4]; /* GP seq for children */ 61 unsigned long srcu_have_cbs[4]; /* GP seq for children */
60 /* having CBs, but only */ 62 /* having CBs, but only */
61 /* is > ->srcu_gq_seq. */ 63 /* is > ->srcu_gq_seq. */
64 unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
65 /* have CBs for given GP? */
62 struct srcu_node *srcu_parent; /* Next up in tree. */ 66 struct srcu_node *srcu_parent; /* Next up in tree. */
63 int grplo; /* Least CPU for node. */ 67 int grplo; /* Least CPU for node. */
64 int grphi; /* Biggest CPU for node. */ 68 int grphi; /* Biggest CPU for node. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 9ecf0acc18eb..1c2c1004b3b1 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -66,8 +66,12 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
66 /* Each pass through this loop initializes one srcu_node structure. */ 66 /* Each pass through this loop initializes one srcu_node structure. */
67 rcu_for_each_node_breadth_first(sp, snp) { 67 rcu_for_each_node_breadth_first(sp, snp) {
68 spin_lock_init(&snp->lock); 68 spin_lock_init(&snp->lock);
69 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) 69 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
70 ARRAY_SIZE(snp->srcu_data_have_cbs));
71 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
70 snp->srcu_have_cbs[i] = 0; 72 snp->srcu_have_cbs[i] = 0;
73 snp->srcu_data_have_cbs[i] = 0;
74 }
71 snp->grplo = -1; 75 snp->grplo = -1;
72 snp->grphi = -1; 76 snp->grphi = -1;
73 if (snp == &sp->node[0]) { 77 if (snp == &sp->node[0]) {
@@ -107,6 +111,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
107 sdp->cpu = cpu; 111 sdp->cpu = cpu;
108 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 112 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
109 sdp->sp = sp; 113 sdp->sp = sp;
114 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
110 if (is_static) 115 if (is_static)
111 continue; 116 continue;
112 117
@@ -434,16 +439,21 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
434 439
435/* 440/*
436 * Schedule callback invocation for all srcu_data structures associated 441 * Schedule callback invocation for all srcu_data structures associated
437 * with the specified srcu_node structure, if possible, on the corresponding 442 * with the specified srcu_node structure that have callbacks for the
438 * CPUs. 443 * just-completed grace period, the one corresponding to idx. If possible,
444 * schedule this invocation on the corresponding CPUs.
439 */ 445 */
440static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp) 446static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
447 unsigned long mask)
441{ 448{
442 int cpu; 449 int cpu;
443 450
444 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) 451 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
452 if (!(mask & (1 << (cpu - snp->grplo))))
453 continue;
445 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), 454 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu),
446 atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); 455 atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL);
456 }
447} 457}
448 458
449/* 459/*
@@ -461,6 +471,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
461 unsigned long gpseq; 471 unsigned long gpseq;
462 int idx; 472 int idx;
463 int idxnext; 473 int idxnext;
474 unsigned long mask;
464 struct srcu_node *snp; 475 struct srcu_node *snp;
465 476
466 /* Prevent more than one additional grace period. */ 477 /* Prevent more than one additional grace period. */
@@ -486,10 +497,12 @@ static void srcu_gp_end(struct srcu_struct *sp)
486 cbs = snp->srcu_have_cbs[idx] == gpseq; 497 cbs = snp->srcu_have_cbs[idx] == gpseq;
487 snp->srcu_have_cbs[idx] = gpseq; 498 snp->srcu_have_cbs[idx] = gpseq;
488 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 499 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
500 mask = snp->srcu_data_have_cbs[idx];
501 snp->srcu_data_have_cbs[idx] = 0;
489 spin_unlock_irq(&snp->lock); 502 spin_unlock_irq(&snp->lock);
490 if (cbs) { 503 if (cbs) {
491 smp_mb(); /* GP end before CB invocation. */ 504 smp_mb(); /* GP end before CB invocation. */
492 srcu_schedule_cbs_snp(sp, snp); 505 srcu_schedule_cbs_snp(sp, snp, mask);
493 } 506 }
494 } 507 }
495 508
@@ -536,6 +549,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp,
536 spin_lock_irqsave(&snp->lock, flags); 549 spin_lock_irqsave(&snp->lock, flags);
537 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 550 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
538 snp_seq = snp->srcu_have_cbs[idx]; 551 snp_seq = snp->srcu_have_cbs[idx];
552 if (snp == sdp->mynode && snp_seq == s)
553 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
539 spin_unlock_irqrestore(&snp->lock, flags); 554 spin_unlock_irqrestore(&snp->lock, flags);
540 if (snp == sdp->mynode && snp_seq != s) { 555 if (snp == sdp->mynode && snp_seq != s) {
541 smp_mb(); /* CBs after GP! */ 556 smp_mb(); /* CBs after GP! */
@@ -544,6 +559,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp,
544 return; 559 return;
545 } 560 }
546 snp->srcu_have_cbs[idx] = s; 561 snp->srcu_have_cbs[idx] = s;
562 if (snp == sdp->mynode)
563 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
547 spin_unlock_irqrestore(&snp->lock, flags); 564 spin_unlock_irqrestore(&snp->lock, flags);
548 } 565 }
549 566