aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-25 19:05:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-25 19:05:40 -0400
commit1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 (patch)
treef00857df7a2eec9520c1a950a0f9ae16cdfc4627 /mm
parent9a60ee117bbeaf2fb9a02ea80a6bdbc2811ca4d2 (diff)
mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts
The mmu-gather operation 'tlb_flush_mmu()' has done two things: the actual tlb flush operation, and the batched freeing of the pages that the TLB entries pointed at. This splits the operation into separate phases, so that the forced batched flushing done by zap_pte_range() can now do the actual TLB flush while still holding the page table lock, but delay the batched freeing of all the pages to after the lock has been dropped. This in turn allows us to avoid a race condition between set_page_dirty() (as called by zap_pte_range() when it finds a dirty shared memory pte) and page_mkclean(): because we now flush all the dirty page data from the TLB's while holding the pte lock, page_mkclean() will be held up walking the (recently cleaned) page tables until after the TLB entries have been flushed from all CPU's. Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Dave Hansen <dave.hansen@intel.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c53
1 files changed, 34 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 93e332d5ed77..037b812a9531 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
232#endif 232#endif
233} 233}
234 234
235void tlb_flush_mmu(struct mmu_gather *tlb) 235static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
236{ 236{
237 struct mmu_gather_batch *batch;
238
239 if (!tlb->need_flush)
240 return;
241 tlb->need_flush = 0; 237 tlb->need_flush = 0;
242 tlb_flush(tlb); 238 tlb_flush(tlb);
243#ifdef CONFIG_HAVE_RCU_TABLE_FREE 239#ifdef CONFIG_HAVE_RCU_TABLE_FREE
244 tlb_table_flush(tlb); 240 tlb_table_flush(tlb);
245#endif 241#endif
242}
243
244static void tlb_flush_mmu_free(struct mmu_gather *tlb)
245{
246 struct mmu_gather_batch *batch;
246 247
247 for (batch = &tlb->local; batch; batch = batch->next) { 248 for (batch = &tlb->local; batch; batch = batch->next) {
248 free_pages_and_swap_cache(batch->pages, batch->nr); 249 free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
251 tlb->active = &tlb->local; 252 tlb->active = &tlb->local;
252} 253}
253 254
255void tlb_flush_mmu(struct mmu_gather *tlb)
256{
257 if (!tlb->need_flush)
258 return;
259 tlb_flush_mmu_tlbonly(tlb);
260 tlb_flush_mmu_free(tlb);
261}
262
254/* tlb_finish_mmu 263/* tlb_finish_mmu
255 * Called at the end of the shootdown operation to free up any resources 264 * Called at the end of the shootdown operation to free up any resources
256 * that were required. 265 * that were required.
@@ -1127,8 +1136,10 @@ again:
1127 if (PageAnon(page)) 1136 if (PageAnon(page))
1128 rss[MM_ANONPAGES]--; 1137 rss[MM_ANONPAGES]--;
1129 else { 1138 else {
1130 if (pte_dirty(ptent)) 1139 if (pte_dirty(ptent)) {
1140 force_flush = 1;
1131 set_page_dirty(page); 1141 set_page_dirty(page);
1142 }
1132 if (pte_young(ptent) && 1143 if (pte_young(ptent) &&
1133 likely(!(vma->vm_flags & VM_SEQ_READ))) 1144 likely(!(vma->vm_flags & VM_SEQ_READ)))
1134 mark_page_accessed(page); 1145 mark_page_accessed(page);
@@ -1137,9 +1148,10 @@ again:
1137 page_remove_rmap(page); 1148 page_remove_rmap(page);
1138 if (unlikely(page_mapcount(page) < 0)) 1149 if (unlikely(page_mapcount(page) < 0))
1139 print_bad_pte(vma, addr, ptent, page); 1150 print_bad_pte(vma, addr, ptent, page);
1140 force_flush = !__tlb_remove_page(tlb, page); 1151 if (unlikely(!__tlb_remove_page(tlb, page))) {
1141 if (force_flush) 1152 force_flush = 1;
1142 break; 1153 break;
1154 }
1143 continue; 1155 continue;
1144 } 1156 }
1145 /* 1157 /*
@@ -1174,18 +1186,11 @@ again:
1174 1186
1175 add_mm_rss_vec(mm, rss); 1187 add_mm_rss_vec(mm, rss);
1176 arch_leave_lazy_mmu_mode(); 1188 arch_leave_lazy_mmu_mode();
1177 pte_unmap_unlock(start_pte, ptl);
1178 1189
1179 /* 1190 /* Do the actual TLB flush before dropping ptl */
1180 * mmu_gather ran out of room to batch pages, we break out of
1181 * the PTE lock to avoid doing the potential expensive TLB invalidate
1182 * and page-free while holding it.
1183 */
1184 if (force_flush) { 1191 if (force_flush) {
1185 unsigned long old_end; 1192 unsigned long old_end;
1186 1193
1187 force_flush = 0;
1188
1189 /* 1194 /*
1190 * Flush the TLB just for the previous segment, 1195 * Flush the TLB just for the previous segment,
1191 * then update the range to be the remaining 1196 * then update the range to be the remaining
@@ -1193,11 +1198,21 @@ again:
1193 */ 1198 */
1194 old_end = tlb->end; 1199 old_end = tlb->end;
1195 tlb->end = addr; 1200 tlb->end = addr;
1196 1201 tlb_flush_mmu_tlbonly(tlb);
1197 tlb_flush_mmu(tlb);
1198
1199 tlb->start = addr; 1202 tlb->start = addr;
1200 tlb->end = old_end; 1203 tlb->end = old_end;
1204 }
1205 pte_unmap_unlock(start_pte, ptl);
1206
1207 /*
1208 * If we forced a TLB flush (either due to running out of
1209 * batch buffers or because we needed to flush dirty TLB
1210 * entries before releasing the ptl), free the batched
1211 * memory too. Restart if we didn't do everything.
1212 */
1213 if (force_flush) {
1214 force_flush = 0;
1215 tlb_flush_mmu_free(tlb);
1201 1216
1202 if (addr != end) 1217 if (addr != end)
1203 goto again; 1218 goto again;