diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-12-16 05:43:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-12-16 05:43:41 -0500 |
commit | 73a7ac2808fa52bdab1781646568b6f90c3d7034 (patch) | |
tree | b3a79f3ce811167c37e9c0e65aeb8a7c70bed4c8 /kernel/rcu/tree.c | |
parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) | |
parent | 0d3c55bc9fd58393bd3bd9974991ec1f815e1326 (diff) |
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull v3.14 RCU updates from Paul E. McKenney.
The main changes:
* Update RCU documentation.
* Miscellaneous fixes.
* Add RCU torture scripts.
* Static-analysis improvements.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 79 |
1 files changed, 65 insertions, 14 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index dd081987a8ec..e37bd561c26f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -369,6 +369,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
369 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | 369 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
370 | bool user) | 370 | bool user) |
371 | { | 371 | { |
372 | struct rcu_state *rsp; | ||
373 | struct rcu_data *rdp; | ||
374 | |||
372 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); | 375 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); |
373 | if (!user && !is_idle_task(current)) { | 376 | if (!user && !is_idle_task(current)) { |
374 | struct task_struct *idle __maybe_unused = | 377 | struct task_struct *idle __maybe_unused = |
@@ -380,6 +383,10 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
380 | current->pid, current->comm, | 383 | current->pid, current->comm, |
381 | idle->pid, idle->comm); /* must be idle task! */ | 384 | idle->pid, idle->comm); /* must be idle task! */ |
382 | } | 385 | } |
386 | for_each_rcu_flavor(rsp) { | ||
387 | rdp = this_cpu_ptr(rsp->rda); | ||
388 | do_nocb_deferred_wakeup(rdp); | ||
389 | } | ||
383 | rcu_prepare_for_idle(smp_processor_id()); | 390 | rcu_prepare_for_idle(smp_processor_id()); |
384 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 391 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
385 | smp_mb__before_atomic_inc(); /* See above. */ | 392 | smp_mb__before_atomic_inc(); /* See above. */ |
@@ -411,11 +418,12 @@ static void rcu_eqs_enter(bool user) | |||
411 | rdtp = this_cpu_ptr(&rcu_dynticks); | 418 | rdtp = this_cpu_ptr(&rcu_dynticks); |
412 | oldval = rdtp->dynticks_nesting; | 419 | oldval = rdtp->dynticks_nesting; |
413 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 420 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); |
414 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 421 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { |
415 | rdtp->dynticks_nesting = 0; | 422 | rdtp->dynticks_nesting = 0; |
416 | else | 423 | rcu_eqs_enter_common(rdtp, oldval, user); |
424 | } else { | ||
417 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 425 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; |
418 | rcu_eqs_enter_common(rdtp, oldval, user); | 426 | } |
419 | } | 427 | } |
420 | 428 | ||
421 | /** | 429 | /** |
@@ -533,11 +541,12 @@ static void rcu_eqs_exit(bool user) | |||
533 | rdtp = this_cpu_ptr(&rcu_dynticks); | 541 | rdtp = this_cpu_ptr(&rcu_dynticks); |
534 | oldval = rdtp->dynticks_nesting; | 542 | oldval = rdtp->dynticks_nesting; |
535 | WARN_ON_ONCE(oldval < 0); | 543 | WARN_ON_ONCE(oldval < 0); |
536 | if (oldval & DYNTICK_TASK_NEST_MASK) | 544 | if (oldval & DYNTICK_TASK_NEST_MASK) { |
537 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 545 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; |
538 | else | 546 | } else { |
539 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 547 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
540 | rcu_eqs_exit_common(rdtp, oldval, user); | 548 | rcu_eqs_exit_common(rdtp, oldval, user); |
549 | } | ||
541 | } | 550 | } |
542 | 551 | ||
543 | /** | 552 | /** |
@@ -716,7 +725,7 @@ bool rcu_lockdep_current_cpu_online(void) | |||
716 | bool ret; | 725 | bool ret; |
717 | 726 | ||
718 | if (in_nmi()) | 727 | if (in_nmi()) |
719 | return 1; | 728 | return true; |
720 | preempt_disable(); | 729 | preempt_disable(); |
721 | rdp = this_cpu_ptr(&rcu_sched_data); | 730 | rdp = this_cpu_ptr(&rcu_sched_data); |
722 | rnp = rdp->mynode; | 731 | rnp = rdp->mynode; |
@@ -755,6 +764,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, | |||
755 | } | 764 | } |
756 | 765 | ||
757 | /* | 766 | /* |
767 | * This function really isn't for public consumption, but RCU is special in | ||
768 | * that context switches can allow the state machine to make progress. | ||
769 | */ | ||
770 | extern void resched_cpu(int cpu); | ||
771 | |||
772 | /* | ||
758 | * Return true if the specified CPU has passed through a quiescent | 773 | * Return true if the specified CPU has passed through a quiescent |
759 | * state by virtue of being in or having passed through an dynticks | 774 | * state by virtue of being in or having passed through an dynticks |
760 | * idle state since the last call to dyntick_save_progress_counter() | 775 | * idle state since the last call to dyntick_save_progress_counter() |
@@ -812,16 +827,34 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
812 | */ | 827 | */ |
813 | rcu_kick_nohz_cpu(rdp->cpu); | 828 | rcu_kick_nohz_cpu(rdp->cpu); |
814 | 829 | ||
830 | /* | ||
831 | * Alternatively, the CPU might be running in the kernel | ||
832 | * for an extended period of time without a quiescent state. | ||
833 | * Attempt to force the CPU through the scheduler to gain the | ||
834 | * needed quiescent state, but only if the grace period has gone | ||
835 | * on for an uncommonly long time. If there are many stuck CPUs, | ||
836 | * we will beat on the first one until it gets unstuck, then move | ||
837 | * to the next. Only do this for the primary flavor of RCU. | ||
838 | */ | ||
839 | if (rdp->rsp == rcu_state && | ||
840 | ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) { | ||
841 | rdp->rsp->jiffies_resched += 5; | ||
842 | resched_cpu(rdp->cpu); | ||
843 | } | ||
844 | |||
815 | return 0; | 845 | return 0; |
816 | } | 846 | } |
817 | 847 | ||
818 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 848 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
819 | { | 849 | { |
820 | unsigned long j = ACCESS_ONCE(jiffies); | 850 | unsigned long j = ACCESS_ONCE(jiffies); |
851 | unsigned long j1; | ||
821 | 852 | ||
822 | rsp->gp_start = j; | 853 | rsp->gp_start = j; |
823 | smp_wmb(); /* Record start time before stall time. */ | 854 | smp_wmb(); /* Record start time before stall time. */ |
824 | rsp->jiffies_stall = j + rcu_jiffies_till_stall_check(); | 855 | j1 = rcu_jiffies_till_stall_check(); |
856 | rsp->jiffies_stall = j + j1; | ||
857 | rsp->jiffies_resched = j + j1 / 2; | ||
825 | } | 858 | } |
826 | 859 | ||
827 | /* | 860 | /* |
@@ -1509,6 +1542,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1509 | rdp = this_cpu_ptr(rsp->rda); | 1542 | rdp = this_cpu_ptr(rsp->rda); |
1510 | if (rnp == rdp->mynode) | 1543 | if (rnp == rdp->mynode) |
1511 | __note_gp_changes(rsp, rnp, rdp); | 1544 | __note_gp_changes(rsp, rnp, rdp); |
1545 | /* smp_mb() provided by prior unlock-lock pair. */ | ||
1512 | nocb += rcu_future_gp_cleanup(rsp, rnp); | 1546 | nocb += rcu_future_gp_cleanup(rsp, rnp); |
1513 | raw_spin_unlock_irq(&rnp->lock); | 1547 | raw_spin_unlock_irq(&rnp->lock); |
1514 | cond_resched(); | 1548 | cond_resched(); |
@@ -1553,6 +1587,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1553 | wait_event_interruptible(rsp->gp_wq, | 1587 | wait_event_interruptible(rsp->gp_wq, |
1554 | ACCESS_ONCE(rsp->gp_flags) & | 1588 | ACCESS_ONCE(rsp->gp_flags) & |
1555 | RCU_GP_FLAG_INIT); | 1589 | RCU_GP_FLAG_INIT); |
1590 | /* Locking provides needed memory barrier. */ | ||
1556 | if (rcu_gp_init(rsp)) | 1591 | if (rcu_gp_init(rsp)) |
1557 | break; | 1592 | break; |
1558 | cond_resched(); | 1593 | cond_resched(); |
@@ -1582,6 +1617,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1582 | (!ACCESS_ONCE(rnp->qsmask) && | 1617 | (!ACCESS_ONCE(rnp->qsmask) && |
1583 | !rcu_preempt_blocked_readers_cgp(rnp)), | 1618 | !rcu_preempt_blocked_readers_cgp(rnp)), |
1584 | j); | 1619 | j); |
1620 | /* Locking provides needed memory barriers. */ | ||
1585 | /* If grace period done, leave loop. */ | 1621 | /* If grace period done, leave loop. */ |
1586 | if (!ACCESS_ONCE(rnp->qsmask) && | 1622 | if (!ACCESS_ONCE(rnp->qsmask) && |
1587 | !rcu_preempt_blocked_readers_cgp(rnp)) | 1623 | !rcu_preempt_blocked_readers_cgp(rnp)) |
@@ -1901,13 +1937,13 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, | |||
1901 | * Adopt the RCU callbacks from the specified rcu_state structure's | 1937 | * Adopt the RCU callbacks from the specified rcu_state structure's |
1902 | * orphanage. The caller must hold the ->orphan_lock. | 1938 | * orphanage. The caller must hold the ->orphan_lock. |
1903 | */ | 1939 | */ |
1904 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | 1940 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) |
1905 | { | 1941 | { |
1906 | int i; | 1942 | int i; |
1907 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); | 1943 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); |
1908 | 1944 | ||
1909 | /* No-CBs CPUs are handled specially. */ | 1945 | /* No-CBs CPUs are handled specially. */ |
1910 | if (rcu_nocb_adopt_orphan_cbs(rsp, rdp)) | 1946 | if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) |
1911 | return; | 1947 | return; |
1912 | 1948 | ||
1913 | /* Do the accounting first. */ | 1949 | /* Do the accounting first. */ |
@@ -1986,7 +2022,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
1986 | 2022 | ||
1987 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ | 2023 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ |
1988 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); | 2024 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); |
1989 | rcu_adopt_orphan_cbs(rsp); | 2025 | rcu_adopt_orphan_cbs(rsp, flags); |
1990 | 2026 | ||
1991 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | 2027 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
1992 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 2028 | mask = rdp->grpmask; /* rnp->grplo is constant. */ |
@@ -2303,6 +2339,9 @@ __rcu_process_callbacks(struct rcu_state *rsp) | |||
2303 | /* If there are callbacks ready, invoke them. */ | 2339 | /* If there are callbacks ready, invoke them. */ |
2304 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 2340 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
2305 | invoke_rcu_callbacks(rsp, rdp); | 2341 | invoke_rcu_callbacks(rsp, rdp); |
2342 | |||
2343 | /* Do any needed deferred wakeups of rcuo kthreads. */ | ||
2344 | do_nocb_deferred_wakeup(rdp); | ||
2306 | } | 2345 | } |
2307 | 2346 | ||
2308 | /* | 2347 | /* |
@@ -2437,7 +2476,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
2437 | 2476 | ||
2438 | if (cpu != -1) | 2477 | if (cpu != -1) |
2439 | rdp = per_cpu_ptr(rsp->rda, cpu); | 2478 | rdp = per_cpu_ptr(rsp->rda, cpu); |
2440 | offline = !__call_rcu_nocb(rdp, head, lazy); | 2479 | offline = !__call_rcu_nocb(rdp, head, lazy, flags); |
2441 | WARN_ON_ONCE(offline); | 2480 | WARN_ON_ONCE(offline); |
2442 | /* _call_rcu() is illegal on offline CPU; leak the callback. */ | 2481 | /* _call_rcu() is illegal on offline CPU; leak the callback. */ |
2443 | local_irq_restore(flags); | 2482 | local_irq_restore(flags); |
@@ -2757,6 +2796,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2757 | /* Check for CPU stalls, if enabled. */ | 2796 | /* Check for CPU stalls, if enabled. */ |
2758 | check_cpu_stall(rsp, rdp); | 2797 | check_cpu_stall(rsp, rdp); |
2759 | 2798 | ||
2799 | /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ | ||
2800 | if (rcu_nohz_full_cpu(rsp)) | ||
2801 | return 0; | ||
2802 | |||
2760 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 2803 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
2761 | if (rcu_scheduler_fully_active && | 2804 | if (rcu_scheduler_fully_active && |
2762 | rdp->qs_pending && !rdp->passed_quiesce) { | 2805 | rdp->qs_pending && !rdp->passed_quiesce) { |
@@ -2790,6 +2833,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2790 | return 1; | 2833 | return 1; |
2791 | } | 2834 | } |
2792 | 2835 | ||
2836 | /* Does this CPU need a deferred NOCB wakeup? */ | ||
2837 | if (rcu_nocb_need_deferred_wakeup(rdp)) { | ||
2838 | rdp->n_rp_nocb_defer_wakeup++; | ||
2839 | return 1; | ||
2840 | } | ||
2841 | |||
2793 | /* nothing to do */ | 2842 | /* nothing to do */ |
2794 | rdp->n_rp_need_nothing++; | 2843 | rdp->n_rp_need_nothing++; |
2795 | return 0; | 2844 | return 0; |
@@ -3214,9 +3263,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
3214 | { | 3263 | { |
3215 | int i; | 3264 | int i; |
3216 | 3265 | ||
3217 | for (i = rcu_num_lvls - 1; i > 0; i--) | 3266 | rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; |
3267 | for (i = rcu_num_lvls - 2; i >= 0; i--) | ||
3218 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; | 3268 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; |
3219 | rsp->levelspread[0] = rcu_fanout_leaf; | ||
3220 | } | 3269 | } |
3221 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ | 3270 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ |
3222 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | 3271 | static void __init rcu_init_levelspread(struct rcu_state *rsp) |
@@ -3346,6 +3395,8 @@ static void __init rcu_init_geometry(void) | |||
3346 | if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && | 3395 | if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && |
3347 | nr_cpu_ids == NR_CPUS) | 3396 | nr_cpu_ids == NR_CPUS) |
3348 | return; | 3397 | return; |
3398 | pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n", | ||
3399 | rcu_fanout_leaf, nr_cpu_ids); | ||
3349 | 3400 | ||
3350 | /* | 3401 | /* |
3351 | * Compute number of nodes that can be handled an rcu_node tree | 3402 | * Compute number of nodes that can be handled an rcu_node tree |