diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 85 |
1 files changed, 62 insertions, 23 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 967d30ccd92b..8f71761bc4b7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/kthread.h> | 38 | #include <linux/kthread.h> |
39 | #include <linux/freezer.h> | 39 | #include <linux/freezer.h> |
40 | #include <linux/memcontrol.h> | 40 | #include <linux/memcontrol.h> |
41 | #include <linux/delayacct.h> | ||
41 | 42 | ||
42 | #include <asm/tlbflush.h> | 43 | #include <asm/tlbflush.h> |
43 | #include <asm/div64.h> | 44 | #include <asm/div64.h> |
@@ -390,17 +391,15 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
390 | } | 391 | } |
391 | 392 | ||
392 | /* | 393 | /* |
393 | * Attempt to detach a locked page from its ->mapping. If it is dirty or if | 394 | * Same as remove_mapping, but if the page is removed from the mapping, it |
394 | * someone else has a ref on the page, abort and return 0. If it was | 395 | * gets returned with a refcount of 0. |
395 | * successfully detached, return 1. Assumes the caller has a single ref on | ||
396 | * this page. | ||
397 | */ | 396 | */ |
398 | int remove_mapping(struct address_space *mapping, struct page *page) | 397 | static int __remove_mapping(struct address_space *mapping, struct page *page) |
399 | { | 398 | { |
400 | BUG_ON(!PageLocked(page)); | 399 | BUG_ON(!PageLocked(page)); |
401 | BUG_ON(mapping != page_mapping(page)); | 400 | BUG_ON(mapping != page_mapping(page)); |
402 | 401 | ||
403 | write_lock_irq(&mapping->tree_lock); | 402 | spin_lock_irq(&mapping->tree_lock); |
404 | /* | 403 | /* |
405 | * The non racy check for a busy page. | 404 | * The non racy check for a busy page. |
406 | * | 405 | * |
@@ -426,28 +425,48 @@ int remove_mapping(struct address_space *mapping, struct page *page) | |||
426 | * Note that if SetPageDirty is always performed via set_page_dirty, | 425 | * Note that if SetPageDirty is always performed via set_page_dirty, |
427 | * and thus under tree_lock, then this ordering is not required. | 426 | * and thus under tree_lock, then this ordering is not required. |
428 | */ | 427 | */ |
429 | if (unlikely(page_count(page) != 2)) | 428 | if (!page_freeze_refs(page, 2)) |
430 | goto cannot_free; | 429 | goto cannot_free; |
431 | smp_rmb(); | 430 | /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ |
432 | if (unlikely(PageDirty(page))) | 431 | if (unlikely(PageDirty(page))) { |
432 | page_unfreeze_refs(page, 2); | ||
433 | goto cannot_free; | 433 | goto cannot_free; |
434 | } | ||
434 | 435 | ||
435 | if (PageSwapCache(page)) { | 436 | if (PageSwapCache(page)) { |
436 | swp_entry_t swap = { .val = page_private(page) }; | 437 | swp_entry_t swap = { .val = page_private(page) }; |
437 | __delete_from_swap_cache(page); | 438 | __delete_from_swap_cache(page); |
438 | write_unlock_irq(&mapping->tree_lock); | 439 | spin_unlock_irq(&mapping->tree_lock); |
439 | swap_free(swap); | 440 | swap_free(swap); |
440 | __put_page(page); /* The pagecache ref */ | 441 | } else { |
441 | return 1; | 442 | __remove_from_page_cache(page); |
443 | spin_unlock_irq(&mapping->tree_lock); | ||
442 | } | 444 | } |
443 | 445 | ||
444 | __remove_from_page_cache(page); | ||
445 | write_unlock_irq(&mapping->tree_lock); | ||
446 | __put_page(page); | ||
447 | return 1; | 446 | return 1; |
448 | 447 | ||
449 | cannot_free: | 448 | cannot_free: |
450 | write_unlock_irq(&mapping->tree_lock); | 449 | spin_unlock_irq(&mapping->tree_lock); |
450 | return 0; | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * Attempt to detach a locked page from its ->mapping. If it is dirty or if | ||
455 | * someone else has a ref on the page, abort and return 0. If it was | ||
456 | * successfully detached, return 1. Assumes the caller has a single ref on | ||
457 | * this page. | ||
458 | */ | ||
459 | int remove_mapping(struct address_space *mapping, struct page *page) | ||
460 | { | ||
461 | if (__remove_mapping(mapping, page)) { | ||
462 | /* | ||
463 | * Unfreezing the refcount with 1 rather than 2 effectively | ||
464 | * drops the pagecache ref for us without requiring another | ||
465 | * atomic operation. | ||
466 | */ | ||
467 | page_unfreeze_refs(page, 1); | ||
468 | return 1; | ||
469 | } | ||
451 | return 0; | 470 | return 0; |
452 | } | 471 | } |
453 | 472 | ||
@@ -597,18 +616,34 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
597 | if (PagePrivate(page)) { | 616 | if (PagePrivate(page)) { |
598 | if (!try_to_release_page(page, sc->gfp_mask)) | 617 | if (!try_to_release_page(page, sc->gfp_mask)) |
599 | goto activate_locked; | 618 | goto activate_locked; |
600 | if (!mapping && page_count(page) == 1) | 619 | if (!mapping && page_count(page) == 1) { |
601 | goto free_it; | 620 | unlock_page(page); |
621 | if (put_page_testzero(page)) | ||
622 | goto free_it; | ||
623 | else { | ||
624 | /* | ||
625 | * rare race with speculative reference. | ||
626 | * the speculative reference will free | ||
627 | * this page shortly, so we may | ||
628 | * increment nr_reclaimed here (and | ||
629 | * leave it off the LRU). | ||
630 | */ | ||
631 | nr_reclaimed++; | ||
632 | continue; | ||
633 | } | ||
634 | } | ||
602 | } | 635 | } |
603 | 636 | ||
604 | if (!mapping || !remove_mapping(mapping, page)) | 637 | if (!mapping || !__remove_mapping(mapping, page)) |
605 | goto keep_locked; | 638 | goto keep_locked; |
606 | 639 | ||
607 | free_it: | ||
608 | unlock_page(page); | 640 | unlock_page(page); |
641 | free_it: | ||
609 | nr_reclaimed++; | 642 | nr_reclaimed++; |
610 | if (!pagevec_add(&freed_pvec, page)) | 643 | if (!pagevec_add(&freed_pvec, page)) { |
611 | __pagevec_release_nonlru(&freed_pvec); | 644 | __pagevec_free(&freed_pvec); |
645 | pagevec_reinit(&freed_pvec); | ||
646 | } | ||
612 | continue; | 647 | continue; |
613 | 648 | ||
614 | activate_locked: | 649 | activate_locked: |
@@ -622,7 +657,7 @@ keep: | |||
622 | } | 657 | } |
623 | list_splice(&ret_pages, page_list); | 658 | list_splice(&ret_pages, page_list); |
624 | if (pagevec_count(&freed_pvec)) | 659 | if (pagevec_count(&freed_pvec)) |
625 | __pagevec_release_nonlru(&freed_pvec); | 660 | __pagevec_free(&freed_pvec); |
626 | count_vm_events(PGACTIVATE, pgactivate); | 661 | count_vm_events(PGACTIVATE, pgactivate); |
627 | return nr_reclaimed; | 662 | return nr_reclaimed; |
628 | } | 663 | } |
@@ -1316,6 +1351,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1316 | struct zone *zone; | 1351 | struct zone *zone; |
1317 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 1352 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); |
1318 | 1353 | ||
1354 | delayacct_freepages_start(); | ||
1355 | |||
1319 | if (scan_global_lru(sc)) | 1356 | if (scan_global_lru(sc)) |
1320 | count_vm_event(ALLOCSTALL); | 1357 | count_vm_event(ALLOCSTALL); |
1321 | /* | 1358 | /* |
@@ -1396,6 +1433,8 @@ out: | |||
1396 | } else | 1433 | } else |
1397 | mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); | 1434 | mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); |
1398 | 1435 | ||
1436 | delayacct_freepages_end(); | ||
1437 | |||
1399 | return ret; | 1438 | return ret; |
1400 | } | 1439 | } |
1401 | 1440 | ||