diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0c47e300210a..88b4a1dcb58c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -387,9 +387,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
387 | } | 387 | } |
388 | rcu_prepare_for_idle(smp_processor_id()); | 388 | rcu_prepare_for_idle(smp_processor_id()); |
389 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 389 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
390 | smp_mb__before_atomic_inc(); /* See above. */ | 390 | smp_mb__before_atomic(); /* See above. */ |
391 | atomic_inc(&rdtp->dynticks); | 391 | atomic_inc(&rdtp->dynticks); |
392 | smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ | 392 | smp_mb__after_atomic(); /* Force ordering with next sojourn. */ |
393 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 393 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
394 | 394 | ||
395 | /* | 395 | /* |
@@ -507,10 +507,10 @@ void rcu_irq_exit(void) | |||
507 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | 507 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
508 | int user) | 508 | int user) |
509 | { | 509 | { |
510 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ | 510 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ |
511 | atomic_inc(&rdtp->dynticks); | 511 | atomic_inc(&rdtp->dynticks); |
512 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 512 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
513 | smp_mb__after_atomic_inc(); /* See above. */ | 513 | smp_mb__after_atomic(); /* See above. */ |
514 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 514 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
515 | rcu_cleanup_after_idle(smp_processor_id()); | 515 | rcu_cleanup_after_idle(smp_processor_id()); |
516 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); | 516 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); |
@@ -635,10 +635,10 @@ void rcu_nmi_enter(void) | |||
635 | (atomic_read(&rdtp->dynticks) & 0x1)) | 635 | (atomic_read(&rdtp->dynticks) & 0x1)) |
636 | return; | 636 | return; |
637 | rdtp->dynticks_nmi_nesting++; | 637 | rdtp->dynticks_nmi_nesting++; |
638 | smp_mb__before_atomic_inc(); /* Force delay from prior write. */ | 638 | smp_mb__before_atomic(); /* Force delay from prior write. */ |
639 | atomic_inc(&rdtp->dynticks); | 639 | atomic_inc(&rdtp->dynticks); |
640 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 640 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
641 | smp_mb__after_atomic_inc(); /* See above. */ | 641 | smp_mb__after_atomic(); /* See above. */ |
642 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 642 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
643 | } | 643 | } |
644 | 644 | ||
@@ -657,9 +657,9 @@ void rcu_nmi_exit(void) | |||
657 | --rdtp->dynticks_nmi_nesting != 0) | 657 | --rdtp->dynticks_nmi_nesting != 0) |
658 | return; | 658 | return; |
659 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 659 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
660 | smp_mb__before_atomic_inc(); /* See above. */ | 660 | smp_mb__before_atomic(); /* See above. */ |
661 | atomic_inc(&rdtp->dynticks); | 661 | atomic_inc(&rdtp->dynticks); |
662 | smp_mb__after_atomic_inc(); /* Force delay to next write. */ | 662 | smp_mb__after_atomic(); /* Force delay to next write. */ |
663 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 663 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
664 | } | 664 | } |
665 | 665 | ||
@@ -2790,7 +2790,7 @@ void synchronize_sched_expedited(void) | |||
2790 | s = atomic_long_read(&rsp->expedited_done); | 2790 | s = atomic_long_read(&rsp->expedited_done); |
2791 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { | 2791 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { |
2792 | /* ensure test happens before caller kfree */ | 2792 | /* ensure test happens before caller kfree */ |
2793 | smp_mb__before_atomic_inc(); /* ^^^ */ | 2793 | smp_mb__before_atomic(); /* ^^^ */ |
2794 | atomic_long_inc(&rsp->expedited_workdone1); | 2794 | atomic_long_inc(&rsp->expedited_workdone1); |
2795 | return; | 2795 | return; |
2796 | } | 2796 | } |
@@ -2808,7 +2808,7 @@ void synchronize_sched_expedited(void) | |||
2808 | s = atomic_long_read(&rsp->expedited_done); | 2808 | s = atomic_long_read(&rsp->expedited_done); |
2809 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { | 2809 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { |
2810 | /* ensure test happens before caller kfree */ | 2810 | /* ensure test happens before caller kfree */ |
2811 | smp_mb__before_atomic_inc(); /* ^^^ */ | 2811 | smp_mb__before_atomic(); /* ^^^ */ |
2812 | atomic_long_inc(&rsp->expedited_workdone2); | 2812 | atomic_long_inc(&rsp->expedited_workdone2); |
2813 | return; | 2813 | return; |
2814 | } | 2814 | } |
@@ -2837,7 +2837,7 @@ void synchronize_sched_expedited(void) | |||
2837 | s = atomic_long_read(&rsp->expedited_done); | 2837 | s = atomic_long_read(&rsp->expedited_done); |
2838 | if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { | 2838 | if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { |
2839 | /* ensure test happens before caller kfree */ | 2839 | /* ensure test happens before caller kfree */ |
2840 | smp_mb__before_atomic_inc(); /* ^^^ */ | 2840 | smp_mb__before_atomic(); /* ^^^ */ |
2841 | atomic_long_inc(&rsp->expedited_done_lost); | 2841 | atomic_long_inc(&rsp->expedited_done_lost); |
2842 | break; | 2842 | break; |
2843 | } | 2843 | } |