diff options
author | Will Deacon <will.deacon@arm.com> | 2014-10-29 06:03:09 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2014-11-17 05:12:42 -0500 |
commit | fb7332a9fedfd62b1ba6530c86f39f0fa38afd49 (patch) | |
tree | 5e77bd4944da750634c4438df64257cdeaa58888 /mm/memory.c | |
parent | 63648dd20fa0780ab6c1e923b5c276d257422cb3 (diff) |
mmu_gather: move minimal range calculations into generic code
On architectures with hardware broadcasting of TLB invalidation messages
, it makes sense to reduce the range of the mmu_gather structure when
unmapping page ranges based on the dirty address information passed to
tlb_remove_tlb_entry.
arm64 already does this by directly manipulating the start/end fields
of the gather structure, but this confuses the generic code which
does not expect these fields to change and can end up calculating
invalid, negative ranges when forcing a flush in zap_pte_range.
This patch moves the minimal range calculation out of the arm64 code
and into the generic implementation, simplifying zap_pte_range in the
process (which no longer needs to care about start/end, since they will
point to the appropriate ranges already). With the range being tracked
by core code, the need_flush flag is dropped in favour of checking that
the end of the range has actually been set.
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
Cc: Michal Simek <monstr@monstr.eu>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1cc6bfbd872e..c71edae9ba44 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -220,9 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
220 | /* Is it from 0 to ~0? */ | 220 | /* Is it from 0 to ~0? */ |
221 | tlb->fullmm = !(start | (end+1)); | 221 | tlb->fullmm = !(start | (end+1)); |
222 | tlb->need_flush_all = 0; | 222 | tlb->need_flush_all = 0; |
223 | tlb->start = start; | ||
224 | tlb->end = end; | ||
225 | tlb->need_flush = 0; | ||
226 | tlb->local.next = NULL; | 223 | tlb->local.next = NULL; |
227 | tlb->local.nr = 0; | 224 | tlb->local.nr = 0; |
228 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | 225 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
@@ -232,15 +229,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
232 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 229 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
233 | tlb->batch = NULL; | 230 | tlb->batch = NULL; |
234 | #endif | 231 | #endif |
232 | |||
233 | __tlb_reset_range(tlb); | ||
235 | } | 234 | } |
236 | 235 | ||
237 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | 236 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
238 | { | 237 | { |
239 | tlb->need_flush = 0; | 238 | if (!tlb->end) |
239 | return; | ||
240 | |||
240 | tlb_flush(tlb); | 241 | tlb_flush(tlb); |
241 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 242 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
242 | tlb_table_flush(tlb); | 243 | tlb_table_flush(tlb); |
243 | #endif | 244 | #endif |
245 | __tlb_reset_range(tlb); | ||
244 | } | 246 | } |
245 | 247 | ||
246 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) | 248 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
@@ -256,8 +258,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) | |||
256 | 258 | ||
257 | void tlb_flush_mmu(struct mmu_gather *tlb) | 259 | void tlb_flush_mmu(struct mmu_gather *tlb) |
258 | { | 260 | { |
259 | if (!tlb->need_flush) | ||
260 | return; | ||
261 | tlb_flush_mmu_tlbonly(tlb); | 261 | tlb_flush_mmu_tlbonly(tlb); |
262 | tlb_flush_mmu_free(tlb); | 262 | tlb_flush_mmu_free(tlb); |
263 | } | 263 | } |
@@ -292,7 +292,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
292 | { | 292 | { |
293 | struct mmu_gather_batch *batch; | 293 | struct mmu_gather_batch *batch; |
294 | 294 | ||
295 | VM_BUG_ON(!tlb->need_flush); | 295 | VM_BUG_ON(!tlb->end); |
296 | 296 | ||
297 | batch = tlb->active; | 297 | batch = tlb->active; |
298 | batch->pages[batch->nr++] = page; | 298 | batch->pages[batch->nr++] = page; |
@@ -359,8 +359,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
359 | { | 359 | { |
360 | struct mmu_table_batch **batch = &tlb->batch; | 360 | struct mmu_table_batch **batch = &tlb->batch; |
361 | 361 | ||
362 | tlb->need_flush = 1; | ||
363 | |||
364 | /* | 362 | /* |
365 | * When there's less then two users of this mm there cannot be a | 363 | * When there's less then two users of this mm there cannot be a |
366 | * concurrent page-table walk. | 364 | * concurrent page-table walk. |
@@ -1185,20 +1183,8 @@ again: | |||
1185 | arch_leave_lazy_mmu_mode(); | 1183 | arch_leave_lazy_mmu_mode(); |
1186 | 1184 | ||
1187 | /* Do the actual TLB flush before dropping ptl */ | 1185 | /* Do the actual TLB flush before dropping ptl */ |
1188 | if (force_flush) { | 1186 | if (force_flush) |
1189 | unsigned long old_end; | ||
1190 | |||
1191 | /* | ||
1192 | * Flush the TLB just for the previous segment, | ||
1193 | * then update the range to be the remaining | ||
1194 | * TLB range. | ||
1195 | */ | ||
1196 | old_end = tlb->end; | ||
1197 | tlb->end = addr; | ||
1198 | tlb_flush_mmu_tlbonly(tlb); | 1187 | tlb_flush_mmu_tlbonly(tlb); |
1199 | tlb->start = addr; | ||
1200 | tlb->end = old_end; | ||
1201 | } | ||
1202 | pte_unmap_unlock(start_pte, ptl); | 1188 | pte_unmap_unlock(start_pte, ptl); |
1203 | 1189 | ||
1204 | /* | 1190 | /* |