diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-07-31 16:34:32 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-09-21 00:16:18 -0400 |
commit | 7922cd0e562cb2b8da2c8a0afda2e1c9bb4a94e2 (patch) | |
tree | dce739485e55be9238c53bf70530a0b5e5a34aa3 | |
parent | f4ecea309d3e17ba5e90082308125ad23bd5701b (diff) |
rcu: Move rcu_report_exp_rnp() to allow consolidation
This is a nearly pure code-movement commit, moving rcu_report_exp_rnp(),
sync_rcu_preempt_exp_done(), and rcu_preempted_readers_exp() so
that later commits can make synchronize_sched_expedited() use them.
The non-code-movement portion of this commit tags rcu_report_exp_rnp()
as __maybe_unused to avoid build errors when CONFIG_PREEMPT=n.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | kernel/rcu/tree.c | 66 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 66 |
2 files changed, 66 insertions, 66 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 53d66ebb4811..59af27d8bc6a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -3379,6 +3379,72 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) | |||
3379 | return rcu_seq_done(&rsp->expedited_sequence, s); | 3379 | return rcu_seq_done(&rsp->expedited_sequence, s); |
3380 | } | 3380 | } |
3381 | 3381 | ||
3382 | /* | ||
3383 | * Return non-zero if there are any tasks in RCU read-side critical | ||
3384 | * sections blocking the current preemptible-RCU expedited grace period. | ||
3385 | * If there is no preemptible-RCU expedited grace period currently in | ||
3386 | * progress, returns zero unconditionally. | ||
3387 | */ | ||
3388 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | ||
3389 | { | ||
3390 | return rnp->exp_tasks != NULL; | ||
3391 | } | ||
3392 | |||
3393 | /* | ||
3394 | * return non-zero if there is no RCU expedited grace period in progress | ||
3395 | * for the specified rcu_node structure, in other words, if all CPUs and | ||
3396 | * tasks covered by the specified rcu_node structure have done their bit | ||
3397 | * for the current expedited grace period. Works only for preemptible | ||
3398 | * RCU -- other RCU implementation use other means. | ||
3399 | * | ||
3400 | * Caller must hold the root rcu_node's exp_funnel_mutex. | ||
3401 | */ | ||
3402 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | ||
3403 | { | ||
3404 | return !rcu_preempted_readers_exp(rnp) && | ||
3405 | READ_ONCE(rnp->expmask) == 0; | ||
3406 | } | ||
3407 | |||
3408 | /* | ||
3409 | * Report the exit from RCU read-side critical section for the last task | ||
3410 | * that queued itself during or before the current expedited preemptible-RCU | ||
3411 | * grace period. This event is reported either to the rcu_node structure on | ||
3412 | * which the task was queued or to one of that rcu_node structure's ancestors, | ||
3413 | * recursively up the tree. (Calm down, calm down, we do the recursion | ||
3414 | * iteratively!) | ||
3415 | * | ||
3416 | * Caller must hold the root rcu_node's exp_funnel_mutex. | ||
3417 | */ | ||
3418 | static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, | ||
3419 | struct rcu_node *rnp, bool wake) | ||
3420 | { | ||
3421 | unsigned long flags; | ||
3422 | unsigned long mask; | ||
3423 | |||
3424 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
3425 | smp_mb__after_unlock_lock(); | ||
3426 | for (;;) { | ||
3427 | if (!sync_rcu_preempt_exp_done(rnp)) { | ||
3428 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
3429 | break; | ||
3430 | } | ||
3431 | if (rnp->parent == NULL) { | ||
3432 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
3433 | if (wake) { | ||
3434 | smp_mb(); /* EGP done before wake_up(). */ | ||
3435 | wake_up(&rsp->expedited_wq); | ||
3436 | } | ||
3437 | break; | ||
3438 | } | ||
3439 | mask = rnp->grpmask; | ||
3440 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
3441 | rnp = rnp->parent; | ||
3442 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | ||
3443 | smp_mb__after_unlock_lock(); | ||
3444 | rnp->expmask &= ~mask; | ||
3445 | } | ||
3446 | } | ||
3447 | |||
3382 | /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ | 3448 | /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ |
3383 | static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, | 3449 | static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, |
3384 | struct rcu_data *rdp, | 3450 | struct rcu_data *rdp, |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 72df006de798..e73be8539978 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -536,72 +536,6 @@ void synchronize_rcu(void) | |||
536 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 536 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
537 | 537 | ||
538 | /* | 538 | /* |
539 | * Return non-zero if there are any tasks in RCU read-side critical | ||
540 | * sections blocking the current preemptible-RCU expedited grace period. | ||
541 | * If there is no preemptible-RCU expedited grace period currently in | ||
542 | * progress, returns zero unconditionally. | ||
543 | */ | ||
544 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | ||
545 | { | ||
546 | return rnp->exp_tasks != NULL; | ||
547 | } | ||
548 | |||
549 | /* | ||
550 | * return non-zero if there is no RCU expedited grace period in progress | ||
551 | * for the specified rcu_node structure, in other words, if all CPUs and | ||
552 | * tasks covered by the specified rcu_node structure have done their bit | ||
553 | * for the current expedited grace period. Works only for preemptible | ||
554 | * RCU -- other RCU implementation use other means. | ||
555 | * | ||
556 | * Caller must hold the root rcu_node's exp_funnel_mutex. | ||
557 | */ | ||
558 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | ||
559 | { | ||
560 | return !rcu_preempted_readers_exp(rnp) && | ||
561 | READ_ONCE(rnp->expmask) == 0; | ||
562 | } | ||
563 | |||
564 | /* | ||
565 | * Report the exit from RCU read-side critical section for the last task | ||
566 | * that queued itself during or before the current expedited preemptible-RCU | ||
567 | * grace period. This event is reported either to the rcu_node structure on | ||
568 | * which the task was queued or to one of that rcu_node structure's ancestors, | ||
569 | * recursively up the tree. (Calm down, calm down, we do the recursion | ||
570 | * iteratively!) | ||
571 | * | ||
572 | * Caller must hold the root rcu_node's exp_funnel_mutex. | ||
573 | */ | ||
574 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | ||
575 | bool wake) | ||
576 | { | ||
577 | unsigned long flags; | ||
578 | unsigned long mask; | ||
579 | |||
580 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
581 | smp_mb__after_unlock_lock(); | ||
582 | for (;;) { | ||
583 | if (!sync_rcu_preempt_exp_done(rnp)) { | ||
584 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
585 | break; | ||
586 | } | ||
587 | if (rnp->parent == NULL) { | ||
588 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
589 | if (wake) { | ||
590 | smp_mb(); /* EGP done before wake_up(). */ | ||
591 | wake_up(&rsp->expedited_wq); | ||
592 | } | ||
593 | break; | ||
594 | } | ||
595 | mask = rnp->grpmask; | ||
596 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
597 | rnp = rnp->parent; | ||
598 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | ||
599 | smp_mb__after_unlock_lock(); | ||
600 | rnp->expmask &= ~mask; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | 539 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited |
606 | * grace period for the specified rcu_node structure, phase 1. If there | 540 | * grace period for the specified rcu_node structure, phase 1. If there |
607 | * are such tasks, set the ->expmask bits up the rcu_node tree and also | 541 | * are such tasks, set the ->expmask bits up the rcu_node tree and also |