aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/huge_memory.c13
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memory.c58
-rw-r--r--mm/vmacache.c8
-rw-r--r--mm/vmscan.c2
6 files changed, 56 insertions, 27 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a82fbe4c9e8e..5020b280a771 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2581,7 +2581,6 @@ EXPORT_SYMBOL(generic_perform_write);
2581 * @iocb: IO state structure (file, offset, etc.) 2581 * @iocb: IO state structure (file, offset, etc.)
2582 * @iov: vector with data to write 2582 * @iov: vector with data to write
2583 * @nr_segs: number of segments in the vector 2583 * @nr_segs: number of segments in the vector
2584 * @ppos: position where to write
2585 * 2584 *
2586 * This function does all the work needed for actually writing data to a 2585 * This function does all the work needed for actually writing data to a
2587 * file. It does all basic checks, removes SUID from the file, updates 2586 * file. It does all basic checks, removes SUID from the file, updates
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 64635f5278ff..b4b1feba6472 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1536,16 +1536,23 @@ pmd_t *page_check_address_pmd(struct page *page,
1536 enum page_check_address_pmd_flag flag, 1536 enum page_check_address_pmd_flag flag,
1537 spinlock_t **ptl) 1537 spinlock_t **ptl)
1538{ 1538{
1539 pgd_t *pgd;
1540 pud_t *pud;
1539 pmd_t *pmd; 1541 pmd_t *pmd;
1540 1542
1541 if (address & ~HPAGE_PMD_MASK) 1543 if (address & ~HPAGE_PMD_MASK)
1542 return NULL; 1544 return NULL;
1543 1545
1544 pmd = mm_find_pmd(mm, address); 1546 pgd = pgd_offset(mm, address);
1545 if (!pmd) 1547 if (!pgd_present(*pgd))
1546 return NULL; 1548 return NULL;
1549 pud = pud_offset(pgd, address);
1550 if (!pud_present(*pud))
1551 return NULL;
1552 pmd = pmd_offset(pud, address);
1553
1547 *ptl = pmd_lock(mm, pmd); 1554 *ptl = pmd_lock(mm, pmd);
1548 if (pmd_none(*pmd)) 1555 if (!pmd_present(*pmd))
1549 goto unlock; 1556 goto unlock;
1550 if (pmd_page(*pmd) != page) 1557 if (pmd_page(*pmd) != page)
1551 goto unlock; 1558 goto unlock;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dd30f22b35e0..246192929a2d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1172,6 +1172,7 @@ static void return_unused_surplus_pages(struct hstate *h,
1172 while (nr_pages--) { 1172 while (nr_pages--) {
1173 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1173 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1174 break; 1174 break;
1175 cond_resched_lock(&hugetlb_lock);
1175 } 1176 }
1176} 1177}
1177 1178
diff --git a/mm/memory.c b/mm/memory.c
index d0f0bef3be48..037b812a9531 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
232#endif 232#endif
233} 233}
234 234
235void tlb_flush_mmu(struct mmu_gather *tlb) 235static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
236{ 236{
237 struct mmu_gather_batch *batch;
238
239 if (!tlb->need_flush)
240 return;
241 tlb->need_flush = 0; 237 tlb->need_flush = 0;
242 tlb_flush(tlb); 238 tlb_flush(tlb);
243#ifdef CONFIG_HAVE_RCU_TABLE_FREE 239#ifdef CONFIG_HAVE_RCU_TABLE_FREE
244 tlb_table_flush(tlb); 240 tlb_table_flush(tlb);
245#endif 241#endif
242}
243
244static void tlb_flush_mmu_free(struct mmu_gather *tlb)
245{
246 struct mmu_gather_batch *batch;
246 247
247 for (batch = &tlb->local; batch; batch = batch->next) { 248 for (batch = &tlb->local; batch; batch = batch->next) {
248 free_pages_and_swap_cache(batch->pages, batch->nr); 249 free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
251 tlb->active = &tlb->local; 252 tlb->active = &tlb->local;
252} 253}
253 254
255void tlb_flush_mmu(struct mmu_gather *tlb)
256{
257 if (!tlb->need_flush)
258 return;
259 tlb_flush_mmu_tlbonly(tlb);
260 tlb_flush_mmu_free(tlb);
261}
262
254/* tlb_finish_mmu 263/* tlb_finish_mmu
255 * Called at the end of the shootdown operation to free up any resources 264 * Called at the end of the shootdown operation to free up any resources
256 * that were required. 265 * that were required.
@@ -1127,8 +1136,10 @@ again:
1127 if (PageAnon(page)) 1136 if (PageAnon(page))
1128 rss[MM_ANONPAGES]--; 1137 rss[MM_ANONPAGES]--;
1129 else { 1138 else {
1130 if (pte_dirty(ptent)) 1139 if (pte_dirty(ptent)) {
1140 force_flush = 1;
1131 set_page_dirty(page); 1141 set_page_dirty(page);
1142 }
1132 if (pte_young(ptent) && 1143 if (pte_young(ptent) &&
1133 likely(!(vma->vm_flags & VM_SEQ_READ))) 1144 likely(!(vma->vm_flags & VM_SEQ_READ)))
1134 mark_page_accessed(page); 1145 mark_page_accessed(page);
@@ -1137,9 +1148,10 @@ again:
1137 page_remove_rmap(page); 1148 page_remove_rmap(page);
1138 if (unlikely(page_mapcount(page) < 0)) 1149 if (unlikely(page_mapcount(page) < 0))
1139 print_bad_pte(vma, addr, ptent, page); 1150 print_bad_pte(vma, addr, ptent, page);
1140 force_flush = !__tlb_remove_page(tlb, page); 1151 if (unlikely(!__tlb_remove_page(tlb, page))) {
1141 if (force_flush) 1152 force_flush = 1;
1142 break; 1153 break;
1154 }
1143 continue; 1155 continue;
1144 } 1156 }
1145 /* 1157 /*
@@ -1174,18 +1186,11 @@ again:
1174 1186
1175 add_mm_rss_vec(mm, rss); 1187 add_mm_rss_vec(mm, rss);
1176 arch_leave_lazy_mmu_mode(); 1188 arch_leave_lazy_mmu_mode();
1177 pte_unmap_unlock(start_pte, ptl);
1178 1189
1179 /* 1190 /* Do the actual TLB flush before dropping ptl */
1180 * mmu_gather ran out of room to batch pages, we break out of
1181 * the PTE lock to avoid doing the potential expensive TLB invalidate
1182 * and page-free while holding it.
1183 */
1184 if (force_flush) { 1191 if (force_flush) {
1185 unsigned long old_end; 1192 unsigned long old_end;
1186 1193
1187 force_flush = 0;
1188
1189 /* 1194 /*
1190 * Flush the TLB just for the previous segment, 1195 * Flush the TLB just for the previous segment,
1191 * then update the range to be the remaining 1196 * then update the range to be the remaining
@@ -1193,11 +1198,21 @@ again:
1193 */ 1198 */
1194 old_end = tlb->end; 1199 old_end = tlb->end;
1195 tlb->end = addr; 1200 tlb->end = addr;
1196 1201 tlb_flush_mmu_tlbonly(tlb);
1197 tlb_flush_mmu(tlb);
1198
1199 tlb->start = addr; 1202 tlb->start = addr;
1200 tlb->end = old_end; 1203 tlb->end = old_end;
1204 }
1205 pte_unmap_unlock(start_pte, ptl);
1206
1207 /*
1208 * If we forced a TLB flush (either due to running out of
1209 * batch buffers or because we needed to flush dirty TLB
1210 * entries before releasing the ptl), free the batched
1211 * memory too. Restart if we didn't do everything.
1212 */
1213 if (force_flush) {
1214 force_flush = 0;
1215 tlb_flush_mmu_free(tlb);
1201 1216
1202 if (addr != end) 1217 if (addr != end)
1203 goto again; 1218 goto again;
@@ -1955,12 +1970,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1955 unsigned long address, unsigned int fault_flags) 1970 unsigned long address, unsigned int fault_flags)
1956{ 1971{
1957 struct vm_area_struct *vma; 1972 struct vm_area_struct *vma;
1973 vm_flags_t vm_flags;
1958 int ret; 1974 int ret;
1959 1975
1960 vma = find_extend_vma(mm, address); 1976 vma = find_extend_vma(mm, address);
1961 if (!vma || address < vma->vm_start) 1977 if (!vma || address < vma->vm_start)
1962 return -EFAULT; 1978 return -EFAULT;
1963 1979
1980 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
1981 if (!(vm_flags & vma->vm_flags))
1982 return -EFAULT;
1983
1964 ret = handle_mm_fault(mm, vma, address, fault_flags); 1984 ret = handle_mm_fault(mm, vma, address, fault_flags);
1965 if (ret & VM_FAULT_ERROR) { 1985 if (ret & VM_FAULT_ERROR) {
1966 if (ret & VM_FAULT_OOM) 1986 if (ret & VM_FAULT_OOM)
diff --git a/mm/vmacache.c b/mm/vmacache.c
index d4224b397c0e..1037a3bab505 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -81,10 +81,12 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
81 for (i = 0; i < VMACACHE_SIZE; i++) { 81 for (i = 0; i < VMACACHE_SIZE; i++) {
82 struct vm_area_struct *vma = current->vmacache[i]; 82 struct vm_area_struct *vma = current->vmacache[i];
83 83
84 if (vma && vma->vm_start <= addr && vma->vm_end > addr) { 84 if (!vma)
85 BUG_ON(vma->vm_mm != mm); 85 continue;
86 if (WARN_ON_ONCE(vma->vm_mm != mm))
87 break;
88 if (vma->vm_start <= addr && vma->vm_end > addr)
86 return vma; 89 return vma;
87 }
88 } 90 }
89 91
90 return NULL; 92 return NULL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9b6497eda806..3f56c8deb3c0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1158,7 +1158,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1158 TTU_UNMAP|TTU_IGNORE_ACCESS, 1158 TTU_UNMAP|TTU_IGNORE_ACCESS,
1159 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true); 1159 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1160 list_splice(&clean_pages, page_list); 1160 list_splice(&clean_pages, page_list);
1161 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); 1161 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
1162 return ret; 1162 return ret;
1163} 1163}
1164 1164