diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-12-02 15:10:15 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-03 05:35:25 -0500 |
commit | d9a3da0699b24a589b27a61e1a5b5bd30d9db669 (patch) | |
tree | f7440e396a6c818f3cef514ccc31ab55d88025ef /kernel/rcutree.c | |
parent | cf244dc01bf68e1ad338b82447f8686d24ea4435 (diff) |
rcu: Add expedited grace-period support for preemptible RCU
Implement an synchronize_rcu_expedited() for preemptible RCU
that actually is expedited. This uses
synchronize_sched_expedited() to force all threads currently
running in a preemptible-RCU read-side critical section onto the
appropriate ->blocked_tasks[] list, then takes a snapshot of all
of these lists and waits for them to drain.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1259784616158-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d47e03e5792a..53ae9598f798 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -948,7 +948,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
948 | { | 948 | { |
949 | unsigned long flags; | 949 | unsigned long flags; |
950 | unsigned long mask; | 950 | unsigned long mask; |
951 | int need_quiet = 0; | 951 | int need_report = 0; |
952 | struct rcu_data *rdp = rsp->rda[cpu]; | 952 | struct rcu_data *rdp = rsp->rda[cpu]; |
953 | struct rcu_node *rnp; | 953 | struct rcu_node *rnp; |
954 | 954 | ||
@@ -967,7 +967,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
967 | break; | 967 | break; |
968 | } | 968 | } |
969 | if (rnp == rdp->mynode) | 969 | if (rnp == rdp->mynode) |
970 | need_quiet = rcu_preempt_offline_tasks(rsp, rnp, rdp); | 970 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); |
971 | else | 971 | else |
972 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 972 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
973 | mask = rnp->grpmask; | 973 | mask = rnp->grpmask; |
@@ -982,10 +982,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
982 | */ | 982 | */ |
983 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 983 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ |
984 | rnp = rdp->mynode; | 984 | rnp = rdp->mynode; |
985 | if (need_quiet) | 985 | if (need_report & RCU_OFL_TASKS_NORM_GP) |
986 | rcu_report_unblock_qs_rnp(rnp, flags); | 986 | rcu_report_unblock_qs_rnp(rnp, flags); |
987 | else | 987 | else |
988 | spin_unlock_irqrestore(&rnp->lock, flags); | 988 | spin_unlock_irqrestore(&rnp->lock, flags); |
989 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
990 | rcu_report_exp_rnp(rsp, rnp); | ||
989 | 991 | ||
990 | rcu_adopt_orphan_cbs(rsp); | 992 | rcu_adopt_orphan_cbs(rsp); |
991 | } | 993 | } |
@@ -1843,6 +1845,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1843 | rnp->level = i; | 1845 | rnp->level = i; |
1844 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 1846 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); |
1845 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | 1847 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); |
1848 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1846 | } | 1850 | } |
1847 | } | 1851 | } |
1848 | } | 1852 | } |