diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 61 |
1 files changed, 39 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6105f475fa86..2466d1250231 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1225,7 +1225,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1225 | next = pmd_addr_end(addr, end); | 1225 | next = pmd_addr_end(addr, end); |
1226 | if (pmd_trans_huge(*pmd)) { | 1226 | if (pmd_trans_huge(*pmd)) { |
1227 | if (next - addr != HPAGE_PMD_SIZE) { | 1227 | if (next - addr != HPAGE_PMD_SIZE) { |
1228 | VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); | 1228 | #ifdef CONFIG_DEBUG_VM |
1229 | if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { | ||
1230 | pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", | ||
1231 | __func__, addr, end, | ||
1232 | vma->vm_start, | ||
1233 | vma->vm_end); | ||
1234 | BUG(); | ||
1235 | } | ||
1236 | #endif | ||
1229 | split_huge_page_pmd(vma->vm_mm, pmd); | 1237 | split_huge_page_pmd(vma->vm_mm, pmd); |
1230 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1238 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1231 | goto next; | 1239 | goto next; |
@@ -1295,7 +1303,7 @@ static void unmap_page_range(struct mmu_gather *tlb, | |||
1295 | 1303 | ||
1296 | static void unmap_single_vma(struct mmu_gather *tlb, | 1304 | static void unmap_single_vma(struct mmu_gather *tlb, |
1297 | struct vm_area_struct *vma, unsigned long start_addr, | 1305 | struct vm_area_struct *vma, unsigned long start_addr, |
1298 | unsigned long end_addr, unsigned long *nr_accounted, | 1306 | unsigned long end_addr, |
1299 | struct zap_details *details) | 1307 | struct zap_details *details) |
1300 | { | 1308 | { |
1301 | unsigned long start = max(vma->vm_start, start_addr); | 1309 | unsigned long start = max(vma->vm_start, start_addr); |
@@ -1307,8 +1315,8 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1307 | if (end <= vma->vm_start) | 1315 | if (end <= vma->vm_start) |
1308 | return; | 1316 | return; |
1309 | 1317 | ||
1310 | if (vma->vm_flags & VM_ACCOUNT) | 1318 | if (vma->vm_file) |
1311 | *nr_accounted += (end - start) >> PAGE_SHIFT; | 1319 | uprobe_munmap(vma, start, end); |
1312 | 1320 | ||
1313 | if (unlikely(is_pfn_mapping(vma))) | 1321 | if (unlikely(is_pfn_mapping(vma))) |
1314 | untrack_pfn_vma(vma, 0, 0); | 1322 | untrack_pfn_vma(vma, 0, 0); |
@@ -1339,8 +1347,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1339 | * @vma: the starting vma | 1347 | * @vma: the starting vma |
1340 | * @start_addr: virtual address at which to start unmapping | 1348 | * @start_addr: virtual address at which to start unmapping |
1341 | * @end_addr: virtual address at which to end unmapping | 1349 | * @end_addr: virtual address at which to end unmapping |
1342 | * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here | ||
1343 | * @details: details of nonlinear truncation or shared cache invalidation | ||
1344 | * | 1350 | * |
1345 | * Unmap all pages in the vma list. | 1351 | * Unmap all pages in the vma list. |
1346 | * | 1352 | * |
@@ -1355,40 +1361,40 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1355 | */ | 1361 | */ |
1356 | void unmap_vmas(struct mmu_gather *tlb, | 1362 | void unmap_vmas(struct mmu_gather *tlb, |
1357 | struct vm_area_struct *vma, unsigned long start_addr, | 1363 | struct vm_area_struct *vma, unsigned long start_addr, |
1358 | unsigned long end_addr, unsigned long *nr_accounted, | 1364 | unsigned long end_addr) |
1359 | struct zap_details *details) | ||
1360 | { | 1365 | { |
1361 | struct mm_struct *mm = vma->vm_mm; | 1366 | struct mm_struct *mm = vma->vm_mm; |
1362 | 1367 | ||
1363 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); | 1368 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); |
1364 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) | 1369 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) |
1365 | unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, | 1370 | unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); |
1366 | details); | ||
1367 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); | 1371 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); |
1368 | } | 1372 | } |
1369 | 1373 | ||
1370 | /** | 1374 | /** |
1371 | * zap_page_range - remove user pages in a given range | 1375 | * zap_page_range - remove user pages in a given range |
1372 | * @vma: vm_area_struct holding the applicable pages | 1376 | * @vma: vm_area_struct holding the applicable pages |
1373 | * @address: starting address of pages to zap | 1377 | * @start: starting address of pages to zap |
1374 | * @size: number of bytes to zap | 1378 | * @size: number of bytes to zap |
1375 | * @details: details of nonlinear truncation or shared cache invalidation | 1379 | * @details: details of nonlinear truncation or shared cache invalidation |
1376 | * | 1380 | * |
1377 | * Caller must protect the VMA list | 1381 | * Caller must protect the VMA list |
1378 | */ | 1382 | */ |
1379 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, | 1383 | void zap_page_range(struct vm_area_struct *vma, unsigned long start, |
1380 | unsigned long size, struct zap_details *details) | 1384 | unsigned long size, struct zap_details *details) |
1381 | { | 1385 | { |
1382 | struct mm_struct *mm = vma->vm_mm; | 1386 | struct mm_struct *mm = vma->vm_mm; |
1383 | struct mmu_gather tlb; | 1387 | struct mmu_gather tlb; |
1384 | unsigned long end = address + size; | 1388 | unsigned long end = start + size; |
1385 | unsigned long nr_accounted = 0; | ||
1386 | 1389 | ||
1387 | lru_add_drain(); | 1390 | lru_add_drain(); |
1388 | tlb_gather_mmu(&tlb, mm, 0); | 1391 | tlb_gather_mmu(&tlb, mm, 0); |
1389 | update_hiwater_rss(mm); | 1392 | update_hiwater_rss(mm); |
1390 | unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); | 1393 | mmu_notifier_invalidate_range_start(mm, start, end); |
1391 | tlb_finish_mmu(&tlb, address, end); | 1394 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
1395 | unmap_single_vma(&tlb, vma, start, end, details); | ||
1396 | mmu_notifier_invalidate_range_end(mm, start, end); | ||
1397 | tlb_finish_mmu(&tlb, start, end); | ||
1392 | } | 1398 | } |
1393 | 1399 | ||
1394 | /** | 1400 | /** |
@@ -1406,13 +1412,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
1406 | struct mm_struct *mm = vma->vm_mm; | 1412 | struct mm_struct *mm = vma->vm_mm; |
1407 | struct mmu_gather tlb; | 1413 | struct mmu_gather tlb; |
1408 | unsigned long end = address + size; | 1414 | unsigned long end = address + size; |
1409 | unsigned long nr_accounted = 0; | ||
1410 | 1415 | ||
1411 | lru_add_drain(); | 1416 | lru_add_drain(); |
1412 | tlb_gather_mmu(&tlb, mm, 0); | 1417 | tlb_gather_mmu(&tlb, mm, 0); |
1413 | update_hiwater_rss(mm); | 1418 | update_hiwater_rss(mm); |
1414 | mmu_notifier_invalidate_range_start(mm, address, end); | 1419 | mmu_notifier_invalidate_range_start(mm, address, end); |
1415 | unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); | 1420 | unmap_single_vma(&tlb, vma, address, end, details); |
1416 | mmu_notifier_invalidate_range_end(mm, address, end); | 1421 | mmu_notifier_invalidate_range_end(mm, address, end); |
1417 | tlb_finish_mmu(&tlb, address, end); | 1422 | tlb_finish_mmu(&tlb, address, end); |
1418 | } | 1423 | } |
@@ -2911,7 +2916,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2911 | delayacct_set_flag(DELAYACCT_PF_SWAPIN); | 2916 | delayacct_set_flag(DELAYACCT_PF_SWAPIN); |
2912 | page = lookup_swap_cache(entry); | 2917 | page = lookup_swap_cache(entry); |
2913 | if (!page) { | 2918 | if (!page) { |
2914 | grab_swap_token(mm); /* Contend for token _before_ read-in */ | ||
2915 | page = swapin_readahead(entry, | 2919 | page = swapin_readahead(entry, |
2916 | GFP_HIGHUSER_MOVABLE, vma, address); | 2920 | GFP_HIGHUSER_MOVABLE, vma, address); |
2917 | if (!page) { | 2921 | if (!page) { |
@@ -2941,6 +2945,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2941 | } | 2945 | } |
2942 | 2946 | ||
2943 | locked = lock_page_or_retry(page, mm, flags); | 2947 | locked = lock_page_or_retry(page, mm, flags); |
2948 | |||
2944 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2949 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
2945 | if (!locked) { | 2950 | if (!locked) { |
2946 | ret |= VM_FAULT_RETRY; | 2951 | ret |= VM_FAULT_RETRY; |
@@ -3489,6 +3494,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3489 | if (unlikely(is_vm_hugetlb_page(vma))) | 3494 | if (unlikely(is_vm_hugetlb_page(vma))) |
3490 | return hugetlb_fault(mm, vma, address, flags); | 3495 | return hugetlb_fault(mm, vma, address, flags); |
3491 | 3496 | ||
3497 | retry: | ||
3492 | pgd = pgd_offset(mm, address); | 3498 | pgd = pgd_offset(mm, address); |
3493 | pud = pud_alloc(mm, pgd, address); | 3499 | pud = pud_alloc(mm, pgd, address); |
3494 | if (!pud) | 3500 | if (!pud) |
@@ -3502,13 +3508,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3502 | pmd, flags); | 3508 | pmd, flags); |
3503 | } else { | 3509 | } else { |
3504 | pmd_t orig_pmd = *pmd; | 3510 | pmd_t orig_pmd = *pmd; |
3511 | int ret; | ||
3512 | |||
3505 | barrier(); | 3513 | barrier(); |
3506 | if (pmd_trans_huge(orig_pmd)) { | 3514 | if (pmd_trans_huge(orig_pmd)) { |
3507 | if (flags & FAULT_FLAG_WRITE && | 3515 | if (flags & FAULT_FLAG_WRITE && |
3508 | !pmd_write(orig_pmd) && | 3516 | !pmd_write(orig_pmd) && |
3509 | !pmd_trans_splitting(orig_pmd)) | 3517 | !pmd_trans_splitting(orig_pmd)) { |
3510 | return do_huge_pmd_wp_page(mm, vma, address, | 3518 | ret = do_huge_pmd_wp_page(mm, vma, address, pmd, |
3511 | pmd, orig_pmd); | 3519 | orig_pmd); |
3520 | /* | ||
3521 | * If COW results in an oom, the huge pmd will | ||
3522 | * have been split, so retry the fault on the | ||
3523 | * pte for a smaller charge. | ||
3524 | */ | ||
3525 | if (unlikely(ret & VM_FAULT_OOM)) | ||
3526 | goto retry; | ||
3527 | return ret; | ||
3528 | } | ||
3512 | return 0; | 3529 | return 0; |
3513 | } | 3530 | } |
3514 | } | 3531 | } |