diff options
author | Peter Zijlstra <peterz@infradead.org> | 2018-09-20 04:54:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-04-03 04:33:01 -0400 |
commit | fa0aafb8acb684e68231ff0a547ed249f8dc31a5 (patch) | |
tree | f5ee01320b5d6700560258508fe0e31ceb6fac18 | |
parent | b3fa8ed4e48802e6ba0aa5f3283313a27dcbf46f (diff) |
asm-generic/tlb: Remove tlb_flush_mmu_free()
As the comment notes; it is a potentially dangerous operation. Just
use tlb_flush_mmu(), that will skip the (double) TLB invalidate if
it really isn't needed anyway.
No change in behavior intended.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/asm-generic/tlb.h | 10 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/mmu_gather.c | 2 |
3 files changed, 5 insertions, 9 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 2648a02a6b1b..ddd3d02be93d 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -67,16 +67,13 @@ | |||
67 | * call before __tlb_remove_page*() to set the current page-size; implies a | 67 | * call before __tlb_remove_page*() to set the current page-size; implies a |
68 | * possible tlb_flush_mmu() call. | 68 | * possible tlb_flush_mmu() call. |
69 | * | 69 | * |
70 | * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() / tlb_flush_mmu_free() | 70 | * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() |
71 | * | 71 | * |
72 | * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets | 72 | * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets |
73 | * related state, like the range) | 73 | * related state, like the range) |
74 | * | 74 | * |
75 | * tlb_flush_mmu_free() - frees the queued pages; make absolutely | 75 | * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees |
76 | * sure no additional tlb_remove_page() | 76 | * whatever pages are still batched. |
77 | * calls happen between _tlbonly() and this. | ||
78 | * | ||
79 | * tlb_flush_mmu() - the above two calls. | ||
80 | * | 77 | * |
81 | * - mmu_gather::fullmm | 78 | * - mmu_gather::fullmm |
82 | * | 79 | * |
@@ -281,7 +278,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, | |||
281 | void tlb_flush_mmu(struct mmu_gather *tlb); | 278 | void tlb_flush_mmu(struct mmu_gather *tlb); |
282 | void arch_tlb_finish_mmu(struct mmu_gather *tlb, | 279 | void arch_tlb_finish_mmu(struct mmu_gather *tlb, |
283 | unsigned long start, unsigned long end, bool force); | 280 | unsigned long start, unsigned long end, bool force); |
284 | void tlb_flush_mmu_free(struct mmu_gather *tlb); | ||
285 | 281 | ||
286 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, | 282 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
287 | unsigned long address, | 283 | unsigned long address, |
diff --git a/mm/memory.c b/mm/memory.c index 1aa5c03566f1..36aac6844662 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1155,7 +1155,7 @@ again: | |||
1155 | */ | 1155 | */ |
1156 | if (force_flush) { | 1156 | if (force_flush) { |
1157 | force_flush = 0; | 1157 | force_flush = 0; |
1158 | tlb_flush_mmu_free(tlb); | 1158 | tlb_flush_mmu(tlb); |
1159 | if (addr != end) | 1159 | if (addr != end) |
1160 | goto again; | 1160 | goto again; |
1161 | } | 1161 | } |
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 7f5b2b8aa9dd..35699a4d0a74 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c | |||
@@ -91,7 +91,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ | |||
91 | 91 | ||
92 | #endif /* HAVE_MMU_GATHER_NO_GATHER */ | 92 | #endif /* HAVE_MMU_GATHER_NO_GATHER */ |
93 | 93 | ||
94 | void tlb_flush_mmu_free(struct mmu_gather *tlb) | 94 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
95 | { | 95 | { |
96 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 96 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
97 | tlb_table_flush(tlb); | 97 | tlb_table_flush(tlb); |