aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c58
1 files changed, 39 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d0f0bef3be48..037b812a9531 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
232#endif 232#endif
233} 233}
234 234
235void tlb_flush_mmu(struct mmu_gather *tlb) 235static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
236{ 236{
237 struct mmu_gather_batch *batch;
238
239 if (!tlb->need_flush)
240 return;
241 tlb->need_flush = 0; 237 tlb->need_flush = 0;
242 tlb_flush(tlb); 238 tlb_flush(tlb);
243#ifdef CONFIG_HAVE_RCU_TABLE_FREE 239#ifdef CONFIG_HAVE_RCU_TABLE_FREE
244 tlb_table_flush(tlb); 240 tlb_table_flush(tlb);
245#endif 241#endif
242}
243
244static void tlb_flush_mmu_free(struct mmu_gather *tlb)
245{
246 struct mmu_gather_batch *batch;
246 247
247 for (batch = &tlb->local; batch; batch = batch->next) { 248 for (batch = &tlb->local; batch; batch = batch->next) {
248 free_pages_and_swap_cache(batch->pages, batch->nr); 249 free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
251 tlb->active = &tlb->local; 252 tlb->active = &tlb->local;
252} 253}
253 254
255void tlb_flush_mmu(struct mmu_gather *tlb)
256{
257 if (!tlb->need_flush)
258 return;
259 tlb_flush_mmu_tlbonly(tlb);
260 tlb_flush_mmu_free(tlb);
261}
262
254/* tlb_finish_mmu 263/* tlb_finish_mmu
255 * Called at the end of the shootdown operation to free up any resources 264 * Called at the end of the shootdown operation to free up any resources
256 * that were required. 265 * that were required.
@@ -1127,8 +1136,10 @@ again:
1127 if (PageAnon(page)) 1136 if (PageAnon(page))
1128 rss[MM_ANONPAGES]--; 1137 rss[MM_ANONPAGES]--;
1129 else { 1138 else {
1130 if (pte_dirty(ptent)) 1139 if (pte_dirty(ptent)) {
1140 force_flush = 1;
1131 set_page_dirty(page); 1141 set_page_dirty(page);
1142 }
1132 if (pte_young(ptent) && 1143 if (pte_young(ptent) &&
1133 likely(!(vma->vm_flags & VM_SEQ_READ))) 1144 likely(!(vma->vm_flags & VM_SEQ_READ)))
1134 mark_page_accessed(page); 1145 mark_page_accessed(page);
@@ -1137,9 +1148,10 @@ again:
1137 page_remove_rmap(page); 1148 page_remove_rmap(page);
1138 if (unlikely(page_mapcount(page) < 0)) 1149 if (unlikely(page_mapcount(page) < 0))
1139 print_bad_pte(vma, addr, ptent, page); 1150 print_bad_pte(vma, addr, ptent, page);
1140 force_flush = !__tlb_remove_page(tlb, page); 1151 if (unlikely(!__tlb_remove_page(tlb, page))) {
1141 if (force_flush) 1152 force_flush = 1;
1142 break; 1153 break;
1154 }
1143 continue; 1155 continue;
1144 } 1156 }
1145 /* 1157 /*
@@ -1174,18 +1186,11 @@ again:
1174 1186
1175 add_mm_rss_vec(mm, rss); 1187 add_mm_rss_vec(mm, rss);
1176 arch_leave_lazy_mmu_mode(); 1188 arch_leave_lazy_mmu_mode();
1177 pte_unmap_unlock(start_pte, ptl);
1178 1189
1179 /* 1190 /* Do the actual TLB flush before dropping ptl */
1180 * mmu_gather ran out of room to batch pages, we break out of
1181 * the PTE lock to avoid doing the potential expensive TLB invalidate
1182 * and page-free while holding it.
1183 */
1184 if (force_flush) { 1191 if (force_flush) {
1185 unsigned long old_end; 1192 unsigned long old_end;
1186 1193
1187 force_flush = 0;
1188
1189 /* 1194 /*
1190 * Flush the TLB just for the previous segment, 1195 * Flush the TLB just for the previous segment,
1191 * then update the range to be the remaining 1196 * then update the range to be the remaining
@@ -1193,11 +1198,21 @@ again:
1193 */ 1198 */
1194 old_end = tlb->end; 1199 old_end = tlb->end;
1195 tlb->end = addr; 1200 tlb->end = addr;
1196 1201 tlb_flush_mmu_tlbonly(tlb);
1197 tlb_flush_mmu(tlb);
1198
1199 tlb->start = addr; 1202 tlb->start = addr;
1200 tlb->end = old_end; 1203 tlb->end = old_end;
1204 }
1205 pte_unmap_unlock(start_pte, ptl);
1206
1207 /*
1208 * If we forced a TLB flush (either due to running out of
1209 * batch buffers or because we needed to flush dirty TLB
1210 * entries before releasing the ptl), free the batched
1211 * memory too. Restart if we didn't do everything.
1212 */
1213 if (force_flush) {
1214 force_flush = 0;
1215 tlb_flush_mmu_free(tlb);
1201 1216
1202 if (addr != end) 1217 if (addr != end)
1203 goto again; 1218 goto again;
@@ -1955,12 +1970,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1955 unsigned long address, unsigned int fault_flags) 1970 unsigned long address, unsigned int fault_flags)
1956{ 1971{
1957 struct vm_area_struct *vma; 1972 struct vm_area_struct *vma;
1973 vm_flags_t vm_flags;
1958 int ret; 1974 int ret;
1959 1975
1960 vma = find_extend_vma(mm, address); 1976 vma = find_extend_vma(mm, address);
1961 if (!vma || address < vma->vm_start) 1977 if (!vma || address < vma->vm_start)
1962 return -EFAULT; 1978 return -EFAULT;
1963 1979
1980 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
1981 if (!(vm_flags & vma->vm_flags))
1982 return -EFAULT;
1983
1964 ret = handle_mm_fault(mm, vma, address, fault_flags); 1984 ret = handle_mm_fault(mm, vma, address, fault_flags);
1965 if (ret & VM_FAULT_ERROR) { 1985 if (ret & VM_FAULT_ERROR) {
1966 if (ret & VM_FAULT_OOM) 1986 if (ret & VM_FAULT_OOM)