diff options
author | Mel Gorman <mgorman@suse.de> | 2013-12-18 20:08:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-18 22:04:51 -0500 |
commit | af2c1401e6f9177483be4fad876d0073669df9df (patch) | |
tree | 630b024e7f5e53fcd73ab4d04148720185797a0a /include/linux | |
parent | 20841405940e7be0617612d521e206e4b6b325db (diff) |
mm: numa: guarantee that tlb_flush_pending updates are visible before page table updates
According to documentation on barriers, stores issued before a LOCK can
complete after the lock implying that it's possible tlb_flush_pending
can be visible after a page table update. As per revised documentation,
this patch adds a smp_mb__before_spinlock to guarantee the correct
ordering.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/mm_types.h | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e5c49c30460f..ad0616f2fe2c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -482,7 +482,12 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | |||
482 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | 482 | static inline void set_tlb_flush_pending(struct mm_struct *mm) |
483 | { | 483 | { |
484 | mm->tlb_flush_pending = true; | 484 | mm->tlb_flush_pending = true; |
485 | barrier(); | 485 | |
486 | /* | ||
487 | * Guarantee that the tlb_flush_pending store does not leak into the | ||
488 | * critical section updating the page tables | ||
489 | */ | ||
490 | smp_mb__before_spinlock(); | ||
486 | } | 491 | } |
487 | /* Clearing is done after a TLB flush, which also provides a barrier. */ | 492 | /* Clearing is done after a TLB flush, which also provides a barrier. */ |
488 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | 493 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) |