diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 39 |
1 files changed, 25 insertions, 14 deletions
| @@ -224,10 +224,14 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | |||
| 224 | /* | 224 | /* |
| 225 | * Check that @page is mapped at @address into @mm. | 225 | * Check that @page is mapped at @address into @mm. |
| 226 | * | 226 | * |
| 227 | * If @sync is false, page_check_address may perform a racy check to avoid | ||
| 228 | * the page table lock when the pte is not present (helpful when reclaiming | ||
| 229 | * highly shared pages). | ||
| 230 | * | ||
| 227 | * On success returns with pte mapped and locked. | 231 | * On success returns with pte mapped and locked. |
| 228 | */ | 232 | */ |
| 229 | pte_t *page_check_address(struct page *page, struct mm_struct *mm, | 233 | pte_t *page_check_address(struct page *page, struct mm_struct *mm, |
| 230 | unsigned long address, spinlock_t **ptlp) | 234 | unsigned long address, spinlock_t **ptlp, int sync) |
| 231 | { | 235 | { |
| 232 | pgd_t *pgd; | 236 | pgd_t *pgd; |
| 233 | pud_t *pud; | 237 | pud_t *pud; |
| @@ -249,7 +253,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, | |||
| 249 | 253 | ||
| 250 | pte = pte_offset_map(pmd, address); | 254 | pte = pte_offset_map(pmd, address); |
| 251 | /* Make a quick check before getting the lock */ | 255 | /* Make a quick check before getting the lock */ |
| 252 | if (!pte_present(*pte)) { | 256 | if (!sync && !pte_present(*pte)) { |
| 253 | pte_unmap(pte); | 257 | pte_unmap(pte); |
| 254 | return NULL; | 258 | return NULL; |
| 255 | } | 259 | } |
| @@ -281,7 +285,7 @@ static int page_referenced_one(struct page *page, | |||
| 281 | if (address == -EFAULT) | 285 | if (address == -EFAULT) |
| 282 | goto out; | 286 | goto out; |
| 283 | 287 | ||
| 284 | pte = page_check_address(page, mm, address, &ptl); | 288 | pte = page_check_address(page, mm, address, &ptl, 0); |
| 285 | if (!pte) | 289 | if (!pte) |
| 286 | goto out; | 290 | goto out; |
| 287 | 291 | ||
| @@ -450,7 +454,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | |||
| 450 | if (address == -EFAULT) | 454 | if (address == -EFAULT) |
| 451 | goto out; | 455 | goto out; |
| 452 | 456 | ||
| 453 | pte = page_check_address(page, mm, address, &ptl); | 457 | pte = page_check_address(page, mm, address, &ptl, 1); |
| 454 | if (!pte) | 458 | if (!pte) |
| 455 | goto out; | 459 | goto out; |
| 456 | 460 | ||
| @@ -659,23 +663,30 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) | |||
| 659 | } | 663 | } |
| 660 | 664 | ||
| 661 | /* | 665 | /* |
| 662 | * It would be tidy to reset the PageAnon mapping here, | 666 | * Now that the last pte has gone, s390 must transfer dirty |
| 663 | * but that might overwrite a racing page_add_anon_rmap | 667 | * flag from storage key to struct page. We can usually skip |
| 664 | * which increments mapcount after us but sets mapping | 668 | * this if the page is anon, so about to be freed; but perhaps |
| 665 | * before us: so leave the reset to free_hot_cold_page, | 669 | * not if it's in swapcache - there might be another pte slot |
| 666 | * and remember that it's only reliable while mapped. | 670 | * containing the swap entry, but page not yet written to swap. |
| 667 | * Leaving it set also helps swapoff to reinstate ptes | ||
| 668 | * faster for those pages still in swapcache. | ||
| 669 | */ | 671 | */ |
| 670 | if ((!PageAnon(page) || PageSwapCache(page)) && | 672 | if ((!PageAnon(page) || PageSwapCache(page)) && |
| 671 | page_test_dirty(page)) { | 673 | page_test_dirty(page)) { |
| 672 | page_clear_dirty(page); | 674 | page_clear_dirty(page); |
| 673 | set_page_dirty(page); | 675 | set_page_dirty(page); |
| 674 | } | 676 | } |
| 675 | mem_cgroup_uncharge_page(page); | ||
| 676 | 677 | ||
| 678 | mem_cgroup_uncharge_page(page); | ||
| 677 | __dec_zone_page_state(page, | 679 | __dec_zone_page_state(page, |
| 678 | PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); | 680 | PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); |
| 681 | /* | ||
| 682 | * It would be tidy to reset the PageAnon mapping here, | ||
| 683 | * but that might overwrite a racing page_add_anon_rmap | ||
| 684 | * which increments mapcount after us but sets mapping | ||
| 685 | * before us: so leave the reset to free_hot_cold_page, | ||
| 686 | * and remember that it's only reliable while mapped. | ||
| 687 | * Leaving it set also helps swapoff to reinstate ptes | ||
| 688 | * faster for those pages still in swapcache. | ||
| 689 | */ | ||
| 679 | } | 690 | } |
| 680 | } | 691 | } |
| 681 | 692 | ||
| @@ -697,7 +708,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 697 | if (address == -EFAULT) | 708 | if (address == -EFAULT) |
| 698 | goto out; | 709 | goto out; |
| 699 | 710 | ||
| 700 | pte = page_check_address(page, mm, address, &ptl); | 711 | pte = page_check_address(page, mm, address, &ptl, 0); |
| 701 | if (!pte) | 712 | if (!pte) |
| 702 | goto out; | 713 | goto out; |
| 703 | 714 | ||
