diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 62 |
1 files changed, 38 insertions, 24 deletions
@@ -360,7 +360,7 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma) | |||
360 | * Returns virtual address or -EFAULT if page's index/offset is not | 360 | * Returns virtual address or -EFAULT if page's index/offset is not |
361 | * within the range mapped the @vma. | 361 | * within the range mapped the @vma. |
362 | */ | 362 | */ |
363 | static inline unsigned long | 363 | inline unsigned long |
364 | vma_address(struct page *page, struct vm_area_struct *vma) | 364 | vma_address(struct page *page, struct vm_area_struct *vma) |
365 | { | 365 | { |
366 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 366 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
@@ -435,6 +435,8 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, | |||
435 | pmd = pmd_offset(pud, address); | 435 | pmd = pmd_offset(pud, address); |
436 | if (!pmd_present(*pmd)) | 436 | if (!pmd_present(*pmd)) |
437 | return NULL; | 437 | return NULL; |
438 | if (pmd_trans_huge(*pmd)) | ||
439 | return NULL; | ||
438 | 440 | ||
439 | pte = pte_offset_map(pmd, address); | 441 | pte = pte_offset_map(pmd, address); |
440 | /* Make a quick check before getting the lock */ | 442 | /* Make a quick check before getting the lock */ |
@@ -489,35 +491,17 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
489 | unsigned long *vm_flags) | 491 | unsigned long *vm_flags) |
490 | { | 492 | { |
491 | struct mm_struct *mm = vma->vm_mm; | 493 | struct mm_struct *mm = vma->vm_mm; |
492 | pte_t *pte; | ||
493 | spinlock_t *ptl; | ||
494 | int referenced = 0; | 494 | int referenced = 0; |
495 | 495 | ||
496 | pte = page_check_address(page, mm, address, &ptl, 0); | ||
497 | if (!pte) | ||
498 | goto out; | ||
499 | |||
500 | /* | 496 | /* |
501 | * Don't want to elevate referenced for mlocked page that gets this far, | 497 | * Don't want to elevate referenced for mlocked page that gets this far, |
502 | * in order that it progresses to try_to_unmap and is moved to the | 498 | * in order that it progresses to try_to_unmap and is moved to the |
503 | * unevictable list. | 499 | * unevictable list. |
504 | */ | 500 | */ |
505 | if (vma->vm_flags & VM_LOCKED) { | 501 | if (vma->vm_flags & VM_LOCKED) { |
506 | *mapcount = 1; /* break early from loop */ | 502 | *mapcount = 0; /* break early from loop */ |
507 | *vm_flags |= VM_LOCKED; | 503 | *vm_flags |= VM_LOCKED; |
508 | goto out_unmap; | 504 | goto out; |
509 | } | ||
510 | |||
511 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | ||
512 | /* | ||
513 | * Don't treat a reference through a sequentially read | ||
514 | * mapping as such. If the page has been used in | ||
515 | * another mapping, we will catch it; if this other | ||
516 | * mapping is already gone, the unmap path will have | ||
517 | * set PG_referenced or activated the page. | ||
518 | */ | ||
519 | if (likely(!VM_SequentialReadHint(vma))) | ||
520 | referenced++; | ||
521 | } | 505 | } |
522 | 506 | ||
523 | /* Pretend the page is referenced if the task has the | 507 | /* Pretend the page is referenced if the task has the |
@@ -526,9 +510,39 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
526 | rwsem_is_locked(&mm->mmap_sem)) | 510 | rwsem_is_locked(&mm->mmap_sem)) |
527 | referenced++; | 511 | referenced++; |
528 | 512 | ||
529 | out_unmap: | 513 | if (unlikely(PageTransHuge(page))) { |
514 | pmd_t *pmd; | ||
515 | |||
516 | spin_lock(&mm->page_table_lock); | ||
517 | pmd = page_check_address_pmd(page, mm, address, | ||
518 | PAGE_CHECK_ADDRESS_PMD_FLAG); | ||
519 | if (pmd && !pmd_trans_splitting(*pmd) && | ||
520 | pmdp_clear_flush_young_notify(vma, address, pmd)) | ||
521 | referenced++; | ||
522 | spin_unlock(&mm->page_table_lock); | ||
523 | } else { | ||
524 | pte_t *pte; | ||
525 | spinlock_t *ptl; | ||
526 | |||
527 | pte = page_check_address(page, mm, address, &ptl, 0); | ||
528 | if (!pte) | ||
529 | goto out; | ||
530 | |||
531 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | ||
532 | /* | ||
533 | * Don't treat a reference through a sequentially read | ||
534 | * mapping as such. If the page has been used in | ||
535 | * another mapping, we will catch it; if this other | ||
536 | * mapping is already gone, the unmap path will have | ||
537 | * set PG_referenced or activated the page. | ||
538 | */ | ||
539 | if (likely(!VM_SequentialReadHint(vma))) | ||
540 | referenced++; | ||
541 | } | ||
542 | pte_unmap_unlock(pte, ptl); | ||
543 | } | ||
544 | |||
530 | (*mapcount)--; | 545 | (*mapcount)--; |
531 | pte_unmap_unlock(pte, ptl); | ||
532 | 546 | ||
533 | if (referenced) | 547 | if (referenced) |
534 | *vm_flags |= vma->vm_flags; | 548 | *vm_flags |= vma->vm_flags; |
@@ -1202,7 +1216,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1202 | return ret; | 1216 | return ret; |
1203 | } | 1217 | } |
1204 | 1218 | ||
1205 | static bool is_vma_temporary_stack(struct vm_area_struct *vma) | 1219 | bool is_vma_temporary_stack(struct vm_area_struct *vma) |
1206 | { | 1220 | { |
1207 | int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); | 1221 | int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); |
1208 | 1222 | ||