aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-12-12 19:42:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:07 -0500
commitb5bc66b713108710e341bb164f8ffbc11896706e (patch)
treebc4f04677ff48c4633c40d092421f3bd86db572d /include/asm-generic
parentc0f2e176f87bd989835bd098a52779df41a9243c (diff)
mm: update mmu_gather range correctly
We use __tlb_adjust_range to update range convered by mmu_gather struct. We later use the 'start' and 'end' to do a mmu_notifier_invalidate_range in tlb_flush_mmu_tlbonly(). Update the 'end' correctly in __tlb_adjust_range so that we call mmu_notifier_invalidate_range with the correct range values. Wrt tlbflush, this should not have any impact, because a flush with correct start address will flush tlb mapping for the range. Also add comment w.r.t updating the range when we free pagetable pages. For now we don't support a range based page table cache flush. Link: http://lkml.kernel.org/r/20161026084839.27299-3-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/tlb.h43
1 files changed, 31 insertions, 12 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index c6d667187608..dba727becd5f 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -125,10 +125,11 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
125 int page_size); 125 int page_size);
126 126
127static inline void __tlb_adjust_range(struct mmu_gather *tlb, 127static inline void __tlb_adjust_range(struct mmu_gather *tlb,
128 unsigned long address) 128 unsigned long address,
129 unsigned int range_size)
129{ 130{
130 tlb->start = min(tlb->start, address); 131 tlb->start = min(tlb->start, address);
131 tlb->end = max(tlb->end, address + PAGE_SIZE); 132 tlb->end = max(tlb->end, address + range_size);
132 /* 133 /*
133 * Track the last address with which we adjusted the range. This 134 * Track the last address with which we adjusted the range. This
134 * will be used later to adjust again after a mmu_flush due to 135 * will be used later to adjust again after a mmu_flush due to
@@ -153,7 +154,7 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
153 if (__tlb_remove_page_size(tlb, page, page_size)) { 154 if (__tlb_remove_page_size(tlb, page, page_size)) {
154 tlb_flush_mmu(tlb); 155 tlb_flush_mmu(tlb);
155 tlb->page_size = page_size; 156 tlb->page_size = page_size;
156 __tlb_adjust_range(tlb, tlb->addr); 157 __tlb_adjust_range(tlb, tlb->addr, page_size);
157 __tlb_remove_page_size(tlb, page, page_size); 158 __tlb_remove_page_size(tlb, page, page_size);
158 } 159 }
159} 160}
@@ -177,7 +178,7 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
177 /* active->nr should be zero when we call this */ 178 /* active->nr should be zero when we call this */
178 VM_BUG_ON_PAGE(tlb->active->nr, page); 179 VM_BUG_ON_PAGE(tlb->active->nr, page);
179 tlb->page_size = PAGE_SIZE; 180 tlb->page_size = PAGE_SIZE;
180 __tlb_adjust_range(tlb, tlb->addr); 181 __tlb_adjust_range(tlb, tlb->addr, PAGE_SIZE);
181 return __tlb_remove_page(tlb, page); 182 return __tlb_remove_page(tlb, page);
182} 183}
183 184
@@ -215,7 +216,7 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
215 */ 216 */
216#define tlb_remove_tlb_entry(tlb, ptep, address) \ 217#define tlb_remove_tlb_entry(tlb, ptep, address) \
217 do { \ 218 do { \
218 __tlb_adjust_range(tlb, address); \ 219 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
219 __tlb_remove_tlb_entry(tlb, ptep, address); \ 220 __tlb_remove_tlb_entry(tlb, ptep, address); \
220 } while (0) 221 } while (0)
221 222
@@ -227,29 +228,47 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
227#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) 228#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
228#endif 229#endif
229 230
230#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ 231#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
231 do { \ 232 do { \
232 __tlb_adjust_range(tlb, address); \ 233 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
233 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ 234 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
234 } while (0) 235 } while (0)
235 236
237/*
238 * For things like page tables caches (ie caching addresses "inside" the
239 * page tables, like x86 does), for legacy reasons, flushing an
240 * individual page had better flush the page table caches behind it. This
241 * is definitely how x86 works, for example. And if you have an
242 * architected non-legacy page table cache (which I'm not aware of
243 * anybody actually doing), you're going to have some architecturally
244 * explicit flushing for that, likely *separate* from a regular TLB entry
245 * flush, and thus you'd need more than just some range expansion..
246 *
247 * So if we ever find an architecture
248 * that would want something that odd, I think it is up to that
249 * architecture to do its own odd thing, not cause pain for others
250 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
251 *
252 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
253 */
254
236#define pte_free_tlb(tlb, ptep, address) \ 255#define pte_free_tlb(tlb, ptep, address) \
237 do { \ 256 do { \
238 __tlb_adjust_range(tlb, address); \ 257 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
239 __pte_free_tlb(tlb, ptep, address); \ 258 __pte_free_tlb(tlb, ptep, address); \
240 } while (0) 259 } while (0)
241 260
242#ifndef __ARCH_HAS_4LEVEL_HACK 261#ifndef __ARCH_HAS_4LEVEL_HACK
243#define pud_free_tlb(tlb, pudp, address) \ 262#define pud_free_tlb(tlb, pudp, address) \
244 do { \ 263 do { \
245 __tlb_adjust_range(tlb, address); \ 264 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
246 __pud_free_tlb(tlb, pudp, address); \ 265 __pud_free_tlb(tlb, pudp, address); \
247 } while (0) 266 } while (0)
248#endif 267#endif
249 268
250#define pmd_free_tlb(tlb, pmdp, address) \ 269#define pmd_free_tlb(tlb, pmdp, address) \
251 do { \ 270 do { \
252 __tlb_adjust_range(tlb, address); \ 271 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
253 __pmd_free_tlb(tlb, pmdp, address); \ 272 __pmd_free_tlb(tlb, pmdp, address); \
254 } while (0) 273 } while (0)
255 274