diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 54 |
1 files changed, 35 insertions, 19 deletions
| @@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
| 497 | struct mm_struct *mm = vma->vm_mm; | 497 | struct mm_struct *mm = vma->vm_mm; |
| 498 | int referenced = 0; | 498 | int referenced = 0; |
| 499 | 499 | ||
| 500 | /* | ||
| 501 | * Don't want to elevate referenced for mlocked page that gets this far, | ||
| 502 | * in order that it progresses to try_to_unmap and is moved to the | ||
| 503 | * unevictable list. | ||
| 504 | */ | ||
| 505 | if (vma->vm_flags & VM_LOCKED) { | ||
| 506 | *mapcount = 0; /* break early from loop */ | ||
| 507 | *vm_flags |= VM_LOCKED; | ||
| 508 | goto out; | ||
| 509 | } | ||
| 510 | |||
| 511 | /* Pretend the page is referenced if the task has the | ||
| 512 | swap token and is in the middle of a page fault. */ | ||
| 513 | if (mm != current->mm && has_swap_token(mm) && | ||
| 514 | rwsem_is_locked(&mm->mmap_sem)) | ||
| 515 | referenced++; | ||
| 516 | |||
| 517 | if (unlikely(PageTransHuge(page))) { | 500 | if (unlikely(PageTransHuge(page))) { |
| 518 | pmd_t *pmd; | 501 | pmd_t *pmd; |
| 519 | 502 | ||
| 520 | spin_lock(&mm->page_table_lock); | 503 | spin_lock(&mm->page_table_lock); |
| 504 | /* | ||
| 505 | * rmap might return false positives; we must filter | ||
| 506 | * these out using page_check_address_pmd(). | ||
| 507 | */ | ||
| 521 | pmd = page_check_address_pmd(page, mm, address, | 508 | pmd = page_check_address_pmd(page, mm, address, |
| 522 | PAGE_CHECK_ADDRESS_PMD_FLAG); | 509 | PAGE_CHECK_ADDRESS_PMD_FLAG); |
| 523 | if (pmd && !pmd_trans_splitting(*pmd) && | 510 | if (!pmd) { |
| 524 | pmdp_clear_flush_young_notify(vma, address, pmd)) | 511 | spin_unlock(&mm->page_table_lock); |
| 512 | goto out; | ||
| 513 | } | ||
| 514 | |||
| 515 | if (vma->vm_flags & VM_LOCKED) { | ||
| 516 | spin_unlock(&mm->page_table_lock); | ||
| 517 | *mapcount = 0; /* break early from loop */ | ||
| 518 | *vm_flags |= VM_LOCKED; | ||
| 519 | goto out; | ||
| 520 | } | ||
| 521 | |||
| 522 | /* go ahead even if the pmd is pmd_trans_splitting() */ | ||
| 523 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) | ||
| 525 | referenced++; | 524 | referenced++; |
| 526 | spin_unlock(&mm->page_table_lock); | 525 | spin_unlock(&mm->page_table_lock); |
| 527 | } else { | 526 | } else { |
| 528 | pte_t *pte; | 527 | pte_t *pte; |
| 529 | spinlock_t *ptl; | 528 | spinlock_t *ptl; |
| 530 | 529 | ||
| 530 | /* | ||
| 531 | * rmap might return false positives; we must filter | ||
| 532 | * these out using page_check_address(). | ||
| 533 | */ | ||
| 531 | pte = page_check_address(page, mm, address, &ptl, 0); | 534 | pte = page_check_address(page, mm, address, &ptl, 0); |
| 532 | if (!pte) | 535 | if (!pte) |
| 533 | goto out; | 536 | goto out; |
| 534 | 537 | ||
| 538 | if (vma->vm_flags & VM_LOCKED) { | ||
| 539 | pte_unmap_unlock(pte, ptl); | ||
| 540 | *mapcount = 0; /* break early from loop */ | ||
| 541 | *vm_flags |= VM_LOCKED; | ||
| 542 | goto out; | ||
| 543 | } | ||
| 544 | |||
| 535 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | 545 | if (ptep_clear_flush_young_notify(vma, address, pte)) { |
| 536 | /* | 546 | /* |
| 537 | * Don't treat a reference through a sequentially read | 547 | * Don't treat a reference through a sequentially read |
| @@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
| 546 | pte_unmap_unlock(pte, ptl); | 556 | pte_unmap_unlock(pte, ptl); |
| 547 | } | 557 | } |
| 548 | 558 | ||
| 559 | /* Pretend the page is referenced if the task has the | ||
| 560 | swap token and is in the middle of a page fault. */ | ||
| 561 | if (mm != current->mm && has_swap_token(mm) && | ||
| 562 | rwsem_is_locked(&mm->mmap_sem)) | ||
| 563 | referenced++; | ||
| 564 | |||
| 549 | (*mapcount)--; | 565 | (*mapcount)--; |
| 550 | 566 | ||
| 551 | if (referenced) | 567 | if (referenced) |
