summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2017-08-10 18:24:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-10 18:54:07 -0400
commit56236a59556cfd3bae7bffb7e5f438b5ef0af880 (patch)
treeb1e540f735b97b095edc76ead43408d793556575 /mm/memory.c
parenta9b802500ebbff1544519a2969323b719dac21f0 (diff)
mm: refactor TLB gathering API
This patch is a preparatory patch for solving race problems caused by TLB batch. For that, we will increase/decrease TLB flush pending count of mm_struct whenever tlb_[gather|finish]_mmu is called. Before making it simple, this patch separates architecture specific part and rename it to arch_tlb_[gather|finish]_mmu and generic part just calls it. It shouldn't change any behavior. Link: http://lkml.kernel.org/r/20170802000818.4760-5-namit@vmware.com Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Nadav Amit <namit@vmware.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Ingo Molnar <mingo@redhat.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Jeff Dike <jdike@addtoit.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f65beaad319b..34cba5113e06 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
215 return true; 215 return true;
216} 216}
217 217
218/* tlb_gather_mmu 218void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
219 * Called to initialize an (on-stack) mmu_gather structure for page-table 219 unsigned long start, unsigned long end)
220 * tear-down from @mm. The @fullmm argument is used when @mm is without
221 * users and we're going to destroy the full address space (exit/execve).
222 */
223void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
224{ 220{
225 tlb->mm = mm; 221 tlb->mm = mm;
226 222
@@ -275,7 +271,8 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
275 * Called at the end of the shootdown operation to free up any resources 271 * Called at the end of the shootdown operation to free up any resources
276 * that were required. 272 * that were required.
277 */ 273 */
278void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 274void arch_tlb_finish_mmu(struct mmu_gather *tlb,
275 unsigned long start, unsigned long end)
279{ 276{
280 struct mmu_gather_batch *batch, *next; 277 struct mmu_gather_batch *batch, *next;
281 278
@@ -398,6 +395,23 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
398 395
399#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 396#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
400 397
398/* tlb_gather_mmu
399 * Called to initialize an (on-stack) mmu_gather structure for page-table
400 * tear-down from @mm. The @fullmm argument is used when @mm is without
401 * users and we're going to destroy the full address space (exit/execve).
402 */
403void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
404 unsigned long start, unsigned long end)
405{
406 arch_tlb_gather_mmu(tlb, mm, start, end);
407}
408
409void tlb_finish_mmu(struct mmu_gather *tlb,
410 unsigned long start, unsigned long end)
411{
412 arch_tlb_finish_mmu(tlb, start, end);
413}
414
401/* 415/*
402 * Note: this doesn't free the actual pages themselves. That 416 * Note: this doesn't free the actual pages themselves. That
403 * has been handled earlier when unmapping all the memory regions. 417 * has been handled earlier when unmapping all the memory regions.