diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3e3f13e8b429..f1ba77363fbb 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -458,9 +458,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
458 | } | 458 | } |
459 | rcu_prepare_for_idle(smp_processor_id()); | 459 | rcu_prepare_for_idle(smp_processor_id()); |
460 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 460 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
461 | smp_mb__before_atomic_inc(); /* See above. */ | 461 | smp_mb__before_atomic(); /* See above. */ |
462 | atomic_inc(&rdtp->dynticks); | 462 | atomic_inc(&rdtp->dynticks); |
463 | smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ | 463 | smp_mb__after_atomic(); /* Force ordering with next sojourn. */ |
464 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 464 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
465 | 465 | ||
466 | /* | 466 | /* |
@@ -578,10 +578,10 @@ void rcu_irq_exit(void) | |||
578 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | 578 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
579 | int user) | 579 | int user) |
580 | { | 580 | { |
581 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ | 581 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ |
582 | atomic_inc(&rdtp->dynticks); | 582 | atomic_inc(&rdtp->dynticks); |
583 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 583 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
584 | smp_mb__after_atomic_inc(); /* See above. */ | 584 | smp_mb__after_atomic(); /* See above. */ |
585 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 585 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
586 | rcu_cleanup_after_idle(smp_processor_id()); | 586 | rcu_cleanup_after_idle(smp_processor_id()); |
587 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); | 587 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); |
@@ -706,10 +706,10 @@ void rcu_nmi_enter(void) | |||
706 | (atomic_read(&rdtp->dynticks) & 0x1)) | 706 | (atomic_read(&rdtp->dynticks) & 0x1)) |
707 | return; | 707 | return; |
708 | rdtp->dynticks_nmi_nesting++; | 708 | rdtp->dynticks_nmi_nesting++; |
709 | smp_mb__before_atomic_inc(); /* Force delay from prior write. */ | 709 | smp_mb__before_atomic(); /* Force delay from prior write. */ |
710 | atomic_inc(&rdtp->dynticks); | 710 | atomic_inc(&rdtp->dynticks); |
711 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 711 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
712 | smp_mb__after_atomic_inc(); /* See above. */ | 712 | smp_mb__after_atomic(); /* See above. */ |
713 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 713 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
714 | } | 714 | } |
715 | 715 | ||
@@ -728,9 +728,9 @@ void rcu_nmi_exit(void) | |||
728 | --rdtp->dynticks_nmi_nesting != 0) | 728 | --rdtp->dynticks_nmi_nesting != 0) |
729 | return; | 729 | return; |
730 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 730 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
731 | smp_mb__before_atomic_inc(); /* See above. */ | 731 | smp_mb__before_atomic(); /* See above. */ |
732 | atomic_inc(&rdtp->dynticks); | 732 | atomic_inc(&rdtp->dynticks); |
733 | smp_mb__after_atomic_inc(); /* Force delay to next write. */ | 733 | smp_mb__after_atomic(); /* Force delay to next write. */ |
734 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 734 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
735 | } | 735 | } |
736 | 736 | ||
@@ -2918,7 +2918,7 @@ void synchronize_sched_expedited(void) | |||
2918 | s = atomic_long_read(&rsp->expedited_done); | 2918 | s = atomic_long_read(&rsp->expedited_done); |
2919 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { | 2919 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { |
2920 | /* ensure test happens before caller kfree */ | 2920 | /* ensure test happens before caller kfree */ |
2921 | smp_mb__before_atomic_inc(); /* ^^^ */ | 2921 | smp_mb__before_atomic(); /* ^^^ */ |
2922 | atomic_long_inc(&rsp->expedited_workdone1); | 2922 | atomic_long_inc(&rsp->expedited_workdone1); |
2923 | return; | 2923 | return; |
2924 | } | 2924 | } |
@@ -2936,7 +2936,7 @@ void synchronize_sched_expedited(void) | |||
2936 | s = atomic_long_read(&rsp->expedited_done); | 2936 | s = atomic_long_read(&rsp->expedited_done); |
2937 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { | 2937 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { |
2938 | /* ensure test happens before caller kfree */ | 2938 | /* ensure test happens before caller kfree */ |
2939 | smp_mb__before_atomic_inc(); /* ^^^ */ | 2939 | smp_mb__before_atomic(); /* ^^^ */ |
2940 | atomic_long_inc(&rsp->expedited_workdone2); | 2940 | atomic_long_inc(&rsp->expedited_workdone2); |
2941 | return; | 2941 | return; |
2942 | } | 2942 | } |
@@ -2965,7 +2965,7 @@ void synchronize_sched_expedited(void) | |||
2965 | s = atomic_long_read(&rsp->expedited_done); | 2965 | s = atomic_long_read(&rsp->expedited_done); |
2966 | if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { | 2966 | if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { |
2967 | /* ensure test happens before caller kfree */ | 2967 | /* ensure test happens before caller kfree */ |
2968 | smp_mb__before_atomic_inc(); /* ^^^ */ | 2968 | smp_mb__before_atomic(); /* ^^^ */ |
2969 | atomic_long_inc(&rsp->expedited_done_lost); | 2969 | atomic_long_inc(&rsp->expedited_done_lost); |
2970 | break; | 2970 | break; |
2971 | } | 2971 | } |