diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2012-03-05 14:14:20 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-03-20 21:39:51 -0400 |
commit | f5cc4eef9987d0b517364d01e290d6438e47ee5d (patch) | |
tree | 1c6a5ec2abf40450b89134564c35c0beafded436 /mm/memory.c | |
parent | 6e8bb0193af3f308ef22817a5560422d33e58b90 (diff) |
VM: make zap_page_range() callers that act on a single VMA use separate helper
... and not rely on ->vm_next being there for them...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 113 |
1 files changed, 74 insertions, 39 deletions
diff --git a/mm/memory.c b/mm/memory.c index 016c67587ef4..8ab09188360a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1307,6 +1307,47 @@ static void unmap_page_range(struct mmu_gather *tlb, | |||
1307 | mem_cgroup_uncharge_end(); | 1307 | mem_cgroup_uncharge_end(); |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | |||
1311 | static void unmap_single_vma(struct mmu_gather *tlb, | ||
1312 | struct vm_area_struct *vma, unsigned long start_addr, | ||
1313 | unsigned long end_addr, unsigned long *nr_accounted, | ||
1314 | struct zap_details *details) | ||
1315 | { | ||
1316 | unsigned long start = max(vma->vm_start, start_addr); | ||
1317 | unsigned long end; | ||
1318 | |||
1319 | if (start >= vma->vm_end) | ||
1320 | return; | ||
1321 | end = min(vma->vm_end, end_addr); | ||
1322 | if (end <= vma->vm_start) | ||
1323 | return; | ||
1324 | |||
1325 | if (vma->vm_flags & VM_ACCOUNT) | ||
1326 | *nr_accounted += (end - start) >> PAGE_SHIFT; | ||
1327 | |||
1328 | if (unlikely(is_pfn_mapping(vma))) | ||
1329 | untrack_pfn_vma(vma, 0, 0); | ||
1330 | |||
1331 | if (start != end) { | ||
1332 | if (unlikely(is_vm_hugetlb_page(vma))) { | ||
1333 | /* | ||
1334 | * It is undesirable to test vma->vm_file as it | ||
1335 | * should be non-null for valid hugetlb area. | ||
1336 | * However, vm_file will be NULL in the error | ||
1337 | * cleanup path of do_mmap_pgoff. When | ||
1338 | * hugetlbfs ->mmap method fails, | ||
1339 | * do_mmap_pgoff() nullifies vma->vm_file | ||
1340 | * before calling this function to clean up. | ||
1341 | * Since no pte has actually been setup, it is | ||
1342 | * safe to do nothing in this case. | ||
1343 | */ | ||
1344 | if (vma->vm_file) | ||
1345 | unmap_hugepage_range(vma, start, end, NULL); | ||
1346 | } else | ||
1347 | unmap_page_range(tlb, vma, start, end, details); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1310 | /** | 1351 | /** |
1311 | * unmap_vmas - unmap a range of memory covered by a list of vma's | 1352 | * unmap_vmas - unmap a range of memory covered by a list of vma's |
1312 | * @tlb: address of the caller's struct mmu_gather | 1353 | * @tlb: address of the caller's struct mmu_gather |
@@ -1332,46 +1373,12 @@ void unmap_vmas(struct mmu_gather *tlb, | |||
1332 | unsigned long end_addr, unsigned long *nr_accounted, | 1373 | unsigned long end_addr, unsigned long *nr_accounted, |
1333 | struct zap_details *details) | 1374 | struct zap_details *details) |
1334 | { | 1375 | { |
1335 | unsigned long start = start_addr; | ||
1336 | struct mm_struct *mm = vma->vm_mm; | 1376 | struct mm_struct *mm = vma->vm_mm; |
1337 | 1377 | ||
1338 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); | 1378 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); |
1339 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { | 1379 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) |
1340 | unsigned long end; | 1380 | unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, |
1341 | 1381 | details); | |
1342 | start = max(vma->vm_start, start_addr); | ||
1343 | if (start >= vma->vm_end) | ||
1344 | continue; | ||
1345 | end = min(vma->vm_end, end_addr); | ||
1346 | if (end <= vma->vm_start) | ||
1347 | continue; | ||
1348 | |||
1349 | if (vma->vm_flags & VM_ACCOUNT) | ||
1350 | *nr_accounted += (end - start) >> PAGE_SHIFT; | ||
1351 | |||
1352 | if (unlikely(is_pfn_mapping(vma))) | ||
1353 | untrack_pfn_vma(vma, 0, 0); | ||
1354 | |||
1355 | if (start != end) { | ||
1356 | if (unlikely(is_vm_hugetlb_page(vma))) { | ||
1357 | /* | ||
1358 | * It is undesirable to test vma->vm_file as it | ||
1359 | * should be non-null for valid hugetlb area. | ||
1360 | * However, vm_file will be NULL in the error | ||
1361 | * cleanup path of do_mmap_pgoff. When | ||
1362 | * hugetlbfs ->mmap method fails, | ||
1363 | * do_mmap_pgoff() nullifies vma->vm_file | ||
1364 | * before calling this function to clean up. | ||
1365 | * Since no pte has actually been setup, it is | ||
1366 | * safe to do nothing in this case. | ||
1367 | */ | ||
1368 | if (vma->vm_file) | ||
1369 | unmap_hugepage_range(vma, start, end, NULL); | ||
1370 | } else | ||
1371 | unmap_page_range(tlb, vma, start, end, details); | ||
1372 | } | ||
1373 | } | ||
1374 | |||
1375 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); | 1382 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); |
1376 | } | 1383 | } |
1377 | 1384 | ||
@@ -1381,6 +1388,8 @@ void unmap_vmas(struct mmu_gather *tlb, | |||
1381 | * @address: starting address of pages to zap | 1388 | * @address: starting address of pages to zap |
1382 | * @size: number of bytes to zap | 1389 | * @size: number of bytes to zap |
1383 | * @details: details of nonlinear truncation or shared cache invalidation | 1390 | * @details: details of nonlinear truncation or shared cache invalidation |
1391 | * | ||
1392 | * Caller must protect the VMA list | ||
1384 | */ | 1393 | */ |
1385 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, | 1394 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, |
1386 | unsigned long size, struct zap_details *details) | 1395 | unsigned long size, struct zap_details *details) |
@@ -1398,6 +1407,32 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
1398 | } | 1407 | } |
1399 | 1408 | ||
1400 | /** | 1409 | /** |
1410 | * zap_page_range_single - remove user pages in a given range | ||
1411 | * @vma: vm_area_struct holding the applicable pages | ||
1412 | * @address: starting address of pages to zap | ||
1413 | * @size: number of bytes to zap | ||
1414 | * @details: details of nonlinear truncation or shared cache invalidation | ||
1415 | * | ||
1416 | * The range must fit into one VMA. | ||
1417 | */ | ||
1418 | static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, | ||
1419 | unsigned long size, struct zap_details *details) | ||
1420 | { | ||
1421 | struct mm_struct *mm = vma->vm_mm; | ||
1422 | struct mmu_gather tlb; | ||
1423 | unsigned long end = address + size; | ||
1424 | unsigned long nr_accounted = 0; | ||
1425 | |||
1426 | lru_add_drain(); | ||
1427 | tlb_gather_mmu(&tlb, mm, 0); | ||
1428 | update_hiwater_rss(mm); | ||
1429 | mmu_notifier_invalidate_range_start(mm, address, end); | ||
1430 | unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); | ||
1431 | mmu_notifier_invalidate_range_end(mm, address, end); | ||
1432 | tlb_finish_mmu(&tlb, address, end); | ||
1433 | } | ||
1434 | |||
1435 | /** | ||
1401 | * zap_vma_ptes - remove ptes mapping the vma | 1436 | * zap_vma_ptes - remove ptes mapping the vma |
1402 | * @vma: vm_area_struct holding ptes to be zapped | 1437 | * @vma: vm_area_struct holding ptes to be zapped |
1403 | * @address: starting address of pages to zap | 1438 | * @address: starting address of pages to zap |
@@ -1415,7 +1450,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, | |||
1415 | if (address < vma->vm_start || address + size > vma->vm_end || | 1450 | if (address < vma->vm_start || address + size > vma->vm_end || |
1416 | !(vma->vm_flags & VM_PFNMAP)) | 1451 | !(vma->vm_flags & VM_PFNMAP)) |
1417 | return -1; | 1452 | return -1; |
1418 | zap_page_range(vma, address, size, NULL); | 1453 | zap_page_range_single(vma, address, size, NULL); |
1419 | return 0; | 1454 | return 0; |
1420 | } | 1455 | } |
1421 | EXPORT_SYMBOL_GPL(zap_vma_ptes); | 1456 | EXPORT_SYMBOL_GPL(zap_vma_ptes); |
@@ -2762,7 +2797,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma, | |||
2762 | unsigned long start_addr, unsigned long end_addr, | 2797 | unsigned long start_addr, unsigned long end_addr, |
2763 | struct zap_details *details) | 2798 | struct zap_details *details) |
2764 | { | 2799 | { |
2765 | zap_page_range(vma, start_addr, end_addr - start_addr, details); | 2800 | zap_page_range_single(vma, start_addr, end_addr - start_addr, details); |
2766 | } | 2801 | } |
2767 | 2802 | ||
2768 | static inline void unmap_mapping_range_tree(struct prio_tree_root *root, | 2803 | static inline void unmap_mapping_range_tree(struct prio_tree_root *root, |