diff options
author | Mel Gorman <mgorman@suse.de> | 2013-12-18 20:08:45 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-01-09 15:24:23 -0500 |
commit | 5d8e03b2544c3cc962106f63c9d60578ad8a4c91 (patch) | |
tree | bcde3a35f6bd52622821f1e9d7c0bb5e7b693361 /include | |
parent | d303cf4624824971d94b4e2c7c95df052d14aa81 (diff) |
mm: numa: guarantee that tlb_flush_pending updates are visible before page table updates
commit af2c1401e6f9177483be4fad876d0073669df9df upstream.
According to documentation on barriers, stores issued before a LOCK can
complete after the lock implying that it's possible tlb_flush_pending
can be visible after a page table update. As per revised documentation,
this patch adds a smp_mb__before_spinlock to guarantee the correct
ordering.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm_types.h | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 49f0ada525a8..10a9a17342fc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -480,7 +480,12 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | |||
480 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | 480 | static inline void set_tlb_flush_pending(struct mm_struct *mm) |
481 | { | 481 | { |
482 | mm->tlb_flush_pending = true; | 482 | mm->tlb_flush_pending = true; |
483 | barrier(); | 483 | |
484 | /* | ||
485 | * Guarantee that the tlb_flush_pending store does not leak into the | ||
486 | * critical section updating the page tables | ||
487 | */ | ||
488 | smp_mb__before_spinlock(); | ||
484 | } | 489 | } |
485 | /* Clearing is done after a TLB flush, which also provides a barrier. */ | 490 | /* Clearing is done after a TLB flush, which also provides a barrier. */ |
486 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | 491 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) |