diff options
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r-- | mm/swapfile.c | 41 |
1 files changed, 19 insertions, 22 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1dcaeda039f4..8970c0b74194 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -61,7 +61,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) | |||
61 | swp_entry_t entry; | 61 | swp_entry_t entry; |
62 | 62 | ||
63 | down_read(&swap_unplug_sem); | 63 | down_read(&swap_unplug_sem); |
64 | entry.val = page->private; | 64 | entry.val = page_private(page); |
65 | if (PageSwapCache(page)) { | 65 | if (PageSwapCache(page)) { |
66 | struct block_device *bdev = swap_info[swp_type(entry)].bdev; | 66 | struct block_device *bdev = swap_info[swp_type(entry)].bdev; |
67 | struct backing_dev_info *bdi; | 67 | struct backing_dev_info *bdi; |
@@ -69,8 +69,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) | |||
69 | /* | 69 | /* |
70 | * If the page is removed from swapcache from under us (with a | 70 | * If the page is removed from swapcache from under us (with a |
71 | * racy try_to_unuse/swapoff) we need an additional reference | 71 | * racy try_to_unuse/swapoff) we need an additional reference |
72 | * count to avoid reading garbage from page->private above. If | 72 | * count to avoid reading garbage from page_private(page) above. |
73 | * the WARN_ON triggers during a swapoff it maybe the race | 73 | * If the WARN_ON triggers during a swapoff it maybe the race |
74 | * condition and it's harmless. However if it triggers without | 74 | * condition and it's harmless. However if it triggers without |
75 | * swapoff it signals a problem. | 75 | * swapoff it signals a problem. |
76 | */ | 76 | */ |
@@ -294,7 +294,7 @@ static inline int page_swapcount(struct page *page) | |||
294 | struct swap_info_struct *p; | 294 | struct swap_info_struct *p; |
295 | swp_entry_t entry; | 295 | swp_entry_t entry; |
296 | 296 | ||
297 | entry.val = page->private; | 297 | entry.val = page_private(page); |
298 | p = swap_info_get(entry); | 298 | p = swap_info_get(entry); |
299 | if (p) { | 299 | if (p) { |
300 | /* Subtract the 1 for the swap cache itself */ | 300 | /* Subtract the 1 for the swap cache itself */ |
@@ -339,7 +339,7 @@ int remove_exclusive_swap_page(struct page *page) | |||
339 | if (page_count(page) != 2) /* 2: us + cache */ | 339 | if (page_count(page) != 2) /* 2: us + cache */ |
340 | return 0; | 340 | return 0; |
341 | 341 | ||
342 | entry.val = page->private; | 342 | entry.val = page_private(page); |
343 | p = swap_info_get(entry); | 343 | p = swap_info_get(entry); |
344 | if (!p) | 344 | if (!p) |
345 | return 0; | 345 | return 0; |
@@ -398,17 +398,14 @@ void free_swap_and_cache(swp_entry_t entry) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Always set the resulting pte to be nowrite (the same as COW pages | 401 | * No need to decide whether this PTE shares the swap entry with others, |
402 | * after one process has exited). We don't know just how many PTEs will | 402 | * just let do_wp_page work it out if a write is requested later - to |
403 | * share this swap entry, so be cautious and let do_wp_page work out | 403 | * force COW, vm_page_prot omits write permission from any private vma. |
404 | * what to do if a write is requested later. | ||
405 | * | ||
406 | * vma->vm_mm->page_table_lock is held. | ||
407 | */ | 404 | */ |
408 | static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, | 405 | static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, |
409 | unsigned long addr, swp_entry_t entry, struct page *page) | 406 | unsigned long addr, swp_entry_t entry, struct page *page) |
410 | { | 407 | { |
411 | inc_mm_counter(vma->vm_mm, rss); | 408 | inc_mm_counter(vma->vm_mm, anon_rss); |
412 | get_page(page); | 409 | get_page(page); |
413 | set_pte_at(vma->vm_mm, addr, pte, | 410 | set_pte_at(vma->vm_mm, addr, pte, |
414 | pte_mkold(mk_pte(page, vma->vm_page_prot))); | 411 | pte_mkold(mk_pte(page, vma->vm_page_prot))); |
@@ -425,23 +422,25 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
425 | unsigned long addr, unsigned long end, | 422 | unsigned long addr, unsigned long end, |
426 | swp_entry_t entry, struct page *page) | 423 | swp_entry_t entry, struct page *page) |
427 | { | 424 | { |
428 | pte_t *pte; | ||
429 | pte_t swp_pte = swp_entry_to_pte(entry); | 425 | pte_t swp_pte = swp_entry_to_pte(entry); |
426 | pte_t *pte; | ||
427 | spinlock_t *ptl; | ||
428 | int found = 0; | ||
430 | 429 | ||
431 | pte = pte_offset_map(pmd, addr); | 430 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
432 | do { | 431 | do { |
433 | /* | 432 | /* |
434 | * swapoff spends a _lot_ of time in this loop! | 433 | * swapoff spends a _lot_ of time in this loop! |
435 | * Test inline before going to call unuse_pte. | 434 | * Test inline before going to call unuse_pte. |
436 | */ | 435 | */ |
437 | if (unlikely(pte_same(*pte, swp_pte))) { | 436 | if (unlikely(pte_same(*pte, swp_pte))) { |
438 | unuse_pte(vma, pte, addr, entry, page); | 437 | unuse_pte(vma, pte++, addr, entry, page); |
439 | pte_unmap(pte); | 438 | found = 1; |
440 | return 1; | 439 | break; |
441 | } | 440 | } |
442 | } while (pte++, addr += PAGE_SIZE, addr != end); | 441 | } while (pte++, addr += PAGE_SIZE, addr != end); |
443 | pte_unmap(pte - 1); | 442 | pte_unmap_unlock(pte - 1, ptl); |
444 | return 0; | 443 | return found; |
445 | } | 444 | } |
446 | 445 | ||
447 | static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, | 446 | static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
@@ -523,12 +522,10 @@ static int unuse_mm(struct mm_struct *mm, | |||
523 | down_read(&mm->mmap_sem); | 522 | down_read(&mm->mmap_sem); |
524 | lock_page(page); | 523 | lock_page(page); |
525 | } | 524 | } |
526 | spin_lock(&mm->page_table_lock); | ||
527 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 525 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
528 | if (vma->anon_vma && unuse_vma(vma, entry, page)) | 526 | if (vma->anon_vma && unuse_vma(vma, entry, page)) |
529 | break; | 527 | break; |
530 | } | 528 | } |
531 | spin_unlock(&mm->page_table_lock); | ||
532 | up_read(&mm->mmap_sem); | 529 | up_read(&mm->mmap_sem); |
533 | /* | 530 | /* |
534 | * Currently unuse_mm cannot fail, but leave error handling | 531 | * Currently unuse_mm cannot fail, but leave error handling |
@@ -1045,7 +1042,7 @@ int page_queue_congested(struct page *page) | |||
1045 | BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ | 1042 | BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ |
1046 | 1043 | ||
1047 | if (PageSwapCache(page)) { | 1044 | if (PageSwapCache(page)) { |
1048 | swp_entry_t entry = { .val = page->private }; | 1045 | swp_entry_t entry = { .val = page_private(page) }; |
1049 | struct swap_info_struct *sis; | 1046 | struct swap_info_struct *sis; |
1050 | 1047 | ||
1051 | sis = get_swap_info_struct(swp_type(entry)); | 1048 | sis = get_swap_info_struct(swp_type(entry)); |