aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-12-05 07:34:46 -0500
committerIngo Molnar <mingo@kernel.org>2017-12-22 14:13:03 -0500
commitb5fc6d943808b570bdfbec80f40c6b3855f1c48b (patch)
tree2e6ddbeb5ad7a205c6c97c8ba0ca877051187ddb
parenta501686b2923ce6f2ff2b1d0d50682c6411baf72 (diff)
x86/mm: Remove superfluous barriers
atomic64_inc_return() already implies smp_mb() before and after. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/tlbflush.h8
1 files changed, 1 insertions, 7 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c2e45da4e540..3e2227386abe 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -60,19 +60,13 @@ static inline void invpcid_flush_all_nonglobals(void)
60 60
61static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) 61static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
62{ 62{
63 u64 new_tlb_gen;
64
65 /* 63 /*
66 * Bump the generation count. This also serves as a full barrier 64 * Bump the generation count. This also serves as a full barrier
67 * that synchronizes with switch_mm(): callers are required to order 65 * that synchronizes with switch_mm(): callers are required to order
68 * their read of mm_cpumask after their writes to the paging 66 * their read of mm_cpumask after their writes to the paging
69 * structures. 67 * structures.
70 */ 68 */
71 smp_mb__before_atomic(); 69 return atomic64_inc_return(&mm->context.tlb_gen);
72 new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
73 smp_mb__after_atomic();
74
75 return new_tlb_gen;
76} 70}
77 71
78#ifdef CONFIG_PARAVIRT 72#ifdef CONFIG_PARAVIRT