aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-02-10 17:09:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 17:30:30 -0500
commit8a5f14a23177061ec11daeaa3d09d0765d785c47 (patch)
tree5199ffd75455cc98b652767813d07e64e6895c4e /mm/memory.c
parentc8d78c1823f46519473949d33f0d1d33fe21ea16 (diff)
mm: drop support of non-linear mapping from unmap/zap codepath
We have remap_file_pages(2) emulation in -mm tree for few release cycles and we plan to have it mainline in v3.20. This patchset removes rest of VM_NONLINEAR infrastructure. Patches 1-8 take care about generic code. They are pretty straight-forward and can be applied without other of patches. Rest patches removes pte_file()-related stuff from architecture-specific code. It usually frees up one bit in non-present pte. I've tried to reuse that bit for swap offset, where I was able to figure out how to do that. For obvious reason I cannot test all that arch-specific code and would like to see acks from maintainers. In total, remap_file_pages(2) required about 1.4K lines of not-so-trivial kernel code. That's too much for functionality nobody uses. Tested-by: Felipe Balbi <balbi@ti.com> This patch (of 38): We don't create non-linear mappings anymore. Let's drop code which handles them on unmap/zap. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c82
1 files changed, 21 insertions, 61 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2c3536cc6c63..9a3e73b69dad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1082,6 +1082,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1082 spinlock_t *ptl; 1082 spinlock_t *ptl;
1083 pte_t *start_pte; 1083 pte_t *start_pte;
1084 pte_t *pte; 1084 pte_t *pte;
1085 swp_entry_t entry;
1085 1086
1086again: 1087again:
1087 init_rss_vec(rss); 1088 init_rss_vec(rss);
@@ -1107,28 +1108,12 @@ again:
1107 if (details->check_mapping && 1108 if (details->check_mapping &&
1108 details->check_mapping != page->mapping) 1109 details->check_mapping != page->mapping)
1109 continue; 1110 continue;
1110 /*
1111 * Each page->index must be checked when
1112 * invalidating or truncating nonlinear.
1113 */
1114 if (details->nonlinear_vma &&
1115 (page->index < details->first_index ||
1116 page->index > details->last_index))
1117 continue;
1118 } 1111 }
1119 ptent = ptep_get_and_clear_full(mm, addr, pte, 1112 ptent = ptep_get_and_clear_full(mm, addr, pte,
1120 tlb->fullmm); 1113 tlb->fullmm);
1121 tlb_remove_tlb_entry(tlb, pte, addr); 1114 tlb_remove_tlb_entry(tlb, pte, addr);
1122 if (unlikely(!page)) 1115 if (unlikely(!page))
1123 continue; 1116 continue;
1124 if (unlikely(details) && details->nonlinear_vma
1125 && linear_page_index(details->nonlinear_vma,
1126 addr) != page->index) {
1127 pte_t ptfile = pgoff_to_pte(page->index);
1128 if (pte_soft_dirty(ptent))
1129 ptfile = pte_file_mksoft_dirty(ptfile);
1130 set_pte_at(mm, addr, pte, ptfile);
1131 }
1132 if (PageAnon(page)) 1117 if (PageAnon(page))
1133 rss[MM_ANONPAGES]--; 1118 rss[MM_ANONPAGES]--;
1134 else { 1119 else {
@@ -1151,33 +1136,25 @@ again:
1151 } 1136 }
1152 continue; 1137 continue;
1153 } 1138 }
1154 /* 1139 /* If details->check_mapping, we leave swap entries. */
1155 * If details->check_mapping, we leave swap entries;
1156 * if details->nonlinear_vma, we leave file entries.
1157 */
1158 if (unlikely(details)) 1140 if (unlikely(details))
1159 continue; 1141 continue;
1160 if (pte_file(ptent)) {
1161 if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
1162 print_bad_pte(vma, addr, ptent, NULL);
1163 } else {
1164 swp_entry_t entry = pte_to_swp_entry(ptent);
1165 1142
1166 if (!non_swap_entry(entry)) 1143 entry = pte_to_swp_entry(ptent);
1167 rss[MM_SWAPENTS]--; 1144 if (!non_swap_entry(entry))
1168 else if (is_migration_entry(entry)) { 1145 rss[MM_SWAPENTS]--;
1169 struct page *page; 1146 else if (is_migration_entry(entry)) {
1147 struct page *page;
1170 1148
1171 page = migration_entry_to_page(entry); 1149 page = migration_entry_to_page(entry);
1172 1150
1173 if (PageAnon(page)) 1151 if (PageAnon(page))
1174 rss[MM_ANONPAGES]--; 1152 rss[MM_ANONPAGES]--;
1175 else 1153 else
1176 rss[MM_FILEPAGES]--; 1154 rss[MM_FILEPAGES]--;
1177 }
1178 if (unlikely(!free_swap_and_cache(entry)))
1179 print_bad_pte(vma, addr, ptent, NULL);
1180 } 1155 }
1156 if (unlikely(!free_swap_and_cache(entry)))
1157 print_bad_pte(vma, addr, ptent, NULL);
1181 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 1158 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1182 } while (pte++, addr += PAGE_SIZE, addr != end); 1159 } while (pte++, addr += PAGE_SIZE, addr != end);
1183 1160
@@ -1277,7 +1254,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
1277 pgd_t *pgd; 1254 pgd_t *pgd;
1278 unsigned long next; 1255 unsigned long next;
1279 1256
1280 if (details && !details->check_mapping && !details->nonlinear_vma) 1257 if (details && !details->check_mapping)
1281 details = NULL; 1258 details = NULL;
1282 1259
1283 BUG_ON(addr >= end); 1260 BUG_ON(addr >= end);
@@ -1371,7 +1348,7 @@ void unmap_vmas(struct mmu_gather *tlb,
1371 * @vma: vm_area_struct holding the applicable pages 1348 * @vma: vm_area_struct holding the applicable pages
1372 * @start: starting address of pages to zap 1349 * @start: starting address of pages to zap
1373 * @size: number of bytes to zap 1350 * @size: number of bytes to zap
1374 * @details: details of nonlinear truncation or shared cache invalidation 1351 * @details: details of shared cache invalidation
1375 * 1352 *
1376 * Caller must protect the VMA list 1353 * Caller must protect the VMA list
1377 */ 1354 */
@@ -1397,7 +1374,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1397 * @vma: vm_area_struct holding the applicable pages 1374 * @vma: vm_area_struct holding the applicable pages
1398 * @address: starting address of pages to zap 1375 * @address: starting address of pages to zap
1399 * @size: number of bytes to zap 1376 * @size: number of bytes to zap
1400 * @details: details of nonlinear truncation or shared cache invalidation 1377 * @details: details of shared cache invalidation
1401 * 1378 *
1402 * The range must fit into one VMA. 1379 * The range must fit into one VMA.
1403 */ 1380 */
@@ -2331,25 +2308,11 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
2331 } 2308 }
2332} 2309}
2333 2310
2334static inline void unmap_mapping_range_list(struct list_head *head,
2335 struct zap_details *details)
2336{
2337 struct vm_area_struct *vma;
2338
2339 /*
2340 * In nonlinear VMAs there is no correspondence between virtual address
2341 * offset and file offset. So we must perform an exhaustive search
2342 * across *all* the pages in each nonlinear VMA, not just the pages
2343 * whose virtual address lies outside the file truncation point.
2344 */
2345 list_for_each_entry(vma, head, shared.nonlinear) {
2346 details->nonlinear_vma = vma;
2347 unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
2348 }
2349}
2350
2351/** 2311/**
2352 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. 2312 * unmap_mapping_range - unmap the portion of all mmaps in the specified
2313 * address_space corresponding to the specified page range in the underlying
2314 * file.
2315 *
2353 * @mapping: the address space containing mmaps to be unmapped. 2316 * @mapping: the address space containing mmaps to be unmapped.
2354 * @holebegin: byte in first page to unmap, relative to the start of 2317 * @holebegin: byte in first page to unmap, relative to the start of
2355 * the underlying file. This will be rounded down to a PAGE_SIZE 2318 * the underlying file. This will be rounded down to a PAGE_SIZE
@@ -2378,7 +2341,6 @@ void unmap_mapping_range(struct address_space *mapping,
2378 } 2341 }
2379 2342
2380 details.check_mapping = even_cows? NULL: mapping; 2343 details.check_mapping = even_cows? NULL: mapping;
2381 details.nonlinear_vma = NULL;
2382 details.first_index = hba; 2344 details.first_index = hba;
2383 details.last_index = hba + hlen - 1; 2345 details.last_index = hba + hlen - 1;
2384 if (details.last_index < details.first_index) 2346 if (details.last_index < details.first_index)
@@ -2388,8 +2350,6 @@ void unmap_mapping_range(struct address_space *mapping,
2388 i_mmap_lock_write(mapping); 2350 i_mmap_lock_write(mapping);
2389 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) 2351 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2390 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2352 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2391 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2392 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2393 i_mmap_unlock_write(mapping); 2353 i_mmap_unlock_write(mapping);
2394} 2354}
2395EXPORT_SYMBOL(unmap_mapping_range); 2355EXPORT_SYMBOL(unmap_mapping_range);