summaryrefslogtreecommitdiffstats
path: root/include/asm-generic/tlb.h
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-07-26 18:24:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commite9d55e157034a9efd99405c99c1565d64619d82b (patch)
treed97dfd0c54d5a9fd1c7b6292c64c83791f9f5278 /include/asm-generic/tlb.h
parent31d49da5ad01728e48a1bb2b43795598b23de68a (diff)
mm: change the interface for __tlb_remove_page()
This updates the generic and arch specific implementation to return true if we need to do a tlb flush. That means if a __tlb_remove_page indicate a flush is needed, the page we try to remove need to be tracked and added again after the flush. We need to track it because we have already update the pte to none and we can't just loop back. This change is done to enable us to do a tlb_flush when we try to flush a range that consists of different page sizes. For architectures like ppc64, we can do a range based tlb flush and we need to track page size for that. When we try to remove a huge page, we will force a tlb flush and starts a new mmu gather. [aneesh.kumar@linux.vnet.ibm.com: mm-change-the-interface-for-__tlb_remove_page-v3] Link: http://lkml.kernel.org/r/1465049193-22197-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1464860389-29019-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic/tlb.h')
-rw-r--r--include/asm-generic/tlb.h44
1 files changed, 33 insertions, 11 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 9dbb739cafa0..7b899a46a4cb 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -107,6 +107,11 @@ struct mmu_gather {
107 struct mmu_gather_batch local; 107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE]; 108 struct page *__pages[MMU_GATHER_BUNDLE];
109 unsigned int batch_count; 109 unsigned int batch_count;
110 /*
111 * __tlb_adjust_range will track the new addr here,
112 * that that we can adjust the range after the flush
113 */
114 unsigned long addr;
110}; 115};
111 116
112#define HAVE_GENERIC_MMU_GATHER 117#define HAVE_GENERIC_MMU_GATHER
@@ -115,23 +120,19 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
115void tlb_flush_mmu(struct mmu_gather *tlb); 120void tlb_flush_mmu(struct mmu_gather *tlb);
116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 121void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
117 unsigned long end); 122 unsigned long end);
118int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); 123bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
119
120/* tlb_remove_page
121 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
122 * required.
123 */
124static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
125{
126 if (!__tlb_remove_page(tlb, page))
127 tlb_flush_mmu(tlb);
128}
129 124
130static inline void __tlb_adjust_range(struct mmu_gather *tlb, 125static inline void __tlb_adjust_range(struct mmu_gather *tlb,
131 unsigned long address) 126 unsigned long address)
132{ 127{
133 tlb->start = min(tlb->start, address); 128 tlb->start = min(tlb->start, address);
134 tlb->end = max(tlb->end, address + PAGE_SIZE); 129 tlb->end = max(tlb->end, address + PAGE_SIZE);
130 /*
131 * Track the last address with which we adjusted the range. This
132 * will be used later to adjust again after a mmu_flush due to
133 * failed __tlb_remove_page
134 */
135 tlb->addr = address;
135} 136}
136 137
137static inline void __tlb_reset_range(struct mmu_gather *tlb) 138static inline void __tlb_reset_range(struct mmu_gather *tlb)
@@ -144,6 +145,27 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
144 } 145 }
145} 146}
146 147
148/* tlb_remove_page
149 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
150 * required.
151 */
152static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
153{
154 if (__tlb_remove_page(tlb, page)) {
155 tlb_flush_mmu(tlb);
156 __tlb_adjust_range(tlb, tlb->addr);
157 __tlb_remove_page(tlb, page);
158 }
159}
160
161static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
162{
163 /* active->nr should be zero when we call this */
164 VM_BUG_ON_PAGE(tlb->active->nr, page);
165 __tlb_adjust_range(tlb, tlb->addr);
166 return __tlb_remove_page(tlb, page);
167}
168
147/* 169/*
148 * In the case of tlb vma handling, we can optimise these away in the 170 * In the case of tlb vma handling, we can optimise these away in the
149 * case where we're doing a full MM flush. When we're doing a munmap, 171 * case where we're doing a full MM flush. When we're doing a munmap,