diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-16 13:10:44 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-16 13:10:44 -0400 |
commit | 96b4672703ed4538c7fc25de36df4415a0ee237c (patch) | |
tree | e5bb8f4c3eb41c5741a7b232cff8e502f6509fc3 /kernel/rcu/tree.c | |
parent | e98d06dd6cd791b5138b0fc6c14a9c0b4d1f2e72 (diff) | |
parent | a53dd6a65668850493cce94395c1b88a015eb338 (diff) |
Merge branch 'rcu-tasks.2014.09.10a' into HEAD
rcu-tasks.2014.09.10a: Add RCU-tasks flavor of RCU.
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 50 |
1 files changed, 28 insertions, 22 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index be0d0a1b7129..d7a3b13bc94c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -197,22 +197,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) | |||
197 | * one since the start of the grace period, this just sets a flag. | 197 | * one since the start of the grace period, this just sets a flag. |
198 | * The caller must have disabled preemption. | 198 | * The caller must have disabled preemption. |
199 | */ | 199 | */ |
200 | void rcu_sched_qs(int cpu) | 200 | void rcu_sched_qs(void) |
201 | { | 201 | { |
202 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); | 202 | if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) { |
203 | 203 | trace_rcu_grace_period(TPS("rcu_sched"), | |
204 | if (rdp->passed_quiesce == 0) | 204 | __this_cpu_read(rcu_sched_data.gpnum), |
205 | trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs")); | 205 | TPS("cpuqs")); |
206 | rdp->passed_quiesce = 1; | 206 | __this_cpu_write(rcu_sched_data.passed_quiesce, 1); |
207 | } | ||
207 | } | 208 | } |
208 | 209 | ||
209 | void rcu_bh_qs(int cpu) | 210 | void rcu_bh_qs(void) |
210 | { | 211 | { |
211 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 212 | if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) { |
212 | 213 | trace_rcu_grace_period(TPS("rcu_bh"), | |
213 | if (rdp->passed_quiesce == 0) | 214 | __this_cpu_read(rcu_bh_data.gpnum), |
214 | trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); | 215 | TPS("cpuqs")); |
215 | rdp->passed_quiesce = 1; | 216 | __this_cpu_write(rcu_bh_data.passed_quiesce, 1); |
217 | } | ||
216 | } | 218 | } |
217 | 219 | ||
218 | static DEFINE_PER_CPU(int, rcu_sched_qs_mask); | 220 | static DEFINE_PER_CPU(int, rcu_sched_qs_mask); |
@@ -287,7 +289,7 @@ static void rcu_momentary_dyntick_idle(void) | |||
287 | void rcu_note_context_switch(int cpu) | 289 | void rcu_note_context_switch(int cpu) |
288 | { | 290 | { |
289 | trace_rcu_utilization(TPS("Start context switch")); | 291 | trace_rcu_utilization(TPS("Start context switch")); |
290 | rcu_sched_qs(cpu); | 292 | rcu_sched_qs(); |
291 | rcu_preempt_note_context_switch(cpu); | 293 | rcu_preempt_note_context_switch(cpu); |
292 | if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) | 294 | if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) |
293 | rcu_momentary_dyntick_idle(); | 295 | rcu_momentary_dyntick_idle(); |
@@ -535,6 +537,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
535 | atomic_inc(&rdtp->dynticks); | 537 | atomic_inc(&rdtp->dynticks); |
536 | smp_mb__after_atomic(); /* Force ordering with next sojourn. */ | 538 | smp_mb__after_atomic(); /* Force ordering with next sojourn. */ |
537 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 539 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
540 | rcu_dynticks_task_enter(); | ||
538 | 541 | ||
539 | /* | 542 | /* |
540 | * It is illegal to enter an extended quiescent state while | 543 | * It is illegal to enter an extended quiescent state while |
@@ -651,6 +654,7 @@ void rcu_irq_exit(void) | |||
651 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | 654 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
652 | int user) | 655 | int user) |
653 | { | 656 | { |
657 | rcu_dynticks_task_exit(); | ||
654 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ | 658 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ |
655 | atomic_inc(&rdtp->dynticks); | 659 | atomic_inc(&rdtp->dynticks); |
656 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 660 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
@@ -1656,7 +1660,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1656 | rnp->level, rnp->grplo, | 1660 | rnp->level, rnp->grplo, |
1657 | rnp->grphi, rnp->qsmask); | 1661 | rnp->grphi, rnp->qsmask); |
1658 | raw_spin_unlock_irq(&rnp->lock); | 1662 | raw_spin_unlock_irq(&rnp->lock); |
1659 | cond_resched(); | 1663 | cond_resched_rcu_qs(); |
1660 | } | 1664 | } |
1661 | 1665 | ||
1662 | mutex_unlock(&rsp->onoff_mutex); | 1666 | mutex_unlock(&rsp->onoff_mutex); |
@@ -1746,7 +1750,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1746 | /* smp_mb() provided by prior unlock-lock pair. */ | 1750 | /* smp_mb() provided by prior unlock-lock pair. */ |
1747 | nocb += rcu_future_gp_cleanup(rsp, rnp); | 1751 | nocb += rcu_future_gp_cleanup(rsp, rnp); |
1748 | raw_spin_unlock_irq(&rnp->lock); | 1752 | raw_spin_unlock_irq(&rnp->lock); |
1749 | cond_resched(); | 1753 | cond_resched_rcu_qs(); |
1750 | } | 1754 | } |
1751 | rnp = rcu_get_root(rsp); | 1755 | rnp = rcu_get_root(rsp); |
1752 | raw_spin_lock_irq(&rnp->lock); | 1756 | raw_spin_lock_irq(&rnp->lock); |
@@ -1795,7 +1799,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1795 | /* Locking provides needed memory barrier. */ | 1799 | /* Locking provides needed memory barrier. */ |
1796 | if (rcu_gp_init(rsp)) | 1800 | if (rcu_gp_init(rsp)) |
1797 | break; | 1801 | break; |
1798 | cond_resched(); | 1802 | cond_resched_rcu_qs(); |
1799 | WARN_ON(signal_pending(current)); | 1803 | WARN_ON(signal_pending(current)); |
1800 | trace_rcu_grace_period(rsp->name, | 1804 | trace_rcu_grace_period(rsp->name, |
1801 | ACCESS_ONCE(rsp->gpnum), | 1805 | ACCESS_ONCE(rsp->gpnum), |
@@ -1838,10 +1842,10 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1838 | trace_rcu_grace_period(rsp->name, | 1842 | trace_rcu_grace_period(rsp->name, |
1839 | ACCESS_ONCE(rsp->gpnum), | 1843 | ACCESS_ONCE(rsp->gpnum), |
1840 | TPS("fqsend")); | 1844 | TPS("fqsend")); |
1841 | cond_resched(); | 1845 | cond_resched_rcu_qs(); |
1842 | } else { | 1846 | } else { |
1843 | /* Deal with stray signal. */ | 1847 | /* Deal with stray signal. */ |
1844 | cond_resched(); | 1848 | cond_resched_rcu_qs(); |
1845 | WARN_ON(signal_pending(current)); | 1849 | WARN_ON(signal_pending(current)); |
1846 | trace_rcu_grace_period(rsp->name, | 1850 | trace_rcu_grace_period(rsp->name, |
1847 | ACCESS_ONCE(rsp->gpnum), | 1851 | ACCESS_ONCE(rsp->gpnum), |
@@ -2401,8 +2405,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
2401 | * at least not while the corresponding CPU is online. | 2405 | * at least not while the corresponding CPU is online. |
2402 | */ | 2406 | */ |
2403 | 2407 | ||
2404 | rcu_sched_qs(cpu); | 2408 | rcu_sched_qs(); |
2405 | rcu_bh_qs(cpu); | 2409 | rcu_bh_qs(); |
2406 | 2410 | ||
2407 | } else if (!in_softirq()) { | 2411 | } else if (!in_softirq()) { |
2408 | 2412 | ||
@@ -2413,11 +2417,13 @@ void rcu_check_callbacks(int cpu, int user) | |||
2413 | * critical section, so note it. | 2417 | * critical section, so note it. |
2414 | */ | 2418 | */ |
2415 | 2419 | ||
2416 | rcu_bh_qs(cpu); | 2420 | rcu_bh_qs(); |
2417 | } | 2421 | } |
2418 | rcu_preempt_check_callbacks(cpu); | 2422 | rcu_preempt_check_callbacks(cpu); |
2419 | if (rcu_pending(cpu)) | 2423 | if (rcu_pending(cpu)) |
2420 | invoke_rcu_core(); | 2424 | invoke_rcu_core(); |
2425 | if (user) | ||
2426 | rcu_note_voluntary_context_switch(current); | ||
2421 | trace_rcu_utilization(TPS("End scheduler-tick")); | 2427 | trace_rcu_utilization(TPS("End scheduler-tick")); |
2422 | } | 2428 | } |
2423 | 2429 | ||
@@ -2440,7 +2446,7 @@ static void force_qs_rnp(struct rcu_state *rsp, | |||
2440 | struct rcu_node *rnp; | 2446 | struct rcu_node *rnp; |
2441 | 2447 | ||
2442 | rcu_for_each_leaf_node(rsp, rnp) { | 2448 | rcu_for_each_leaf_node(rsp, rnp) { |
2443 | cond_resched(); | 2449 | cond_resched_rcu_qs(); |
2444 | mask = 0; | 2450 | mask = 0; |
2445 | raw_spin_lock_irqsave(&rnp->lock, flags); | 2451 | raw_spin_lock_irqsave(&rnp->lock, flags); |
2446 | smp_mb__after_unlock_lock(); | 2452 | smp_mb__after_unlock_lock(); |