diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-07-28 15:14:43 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-07-28 15:14:43 -0400 |
| commit | 414f746d232d41ed6ae8632c4495ae795373c44b (patch) | |
| tree | 167f9bc8f139c6e82e6732b38c7a938b8a9d31cd /mm/vmscan.c | |
| parent | 5a7a201c51c324876d00a54e7208af6af12d1ca4 (diff) | |
| parent | c9272c4f9fbe2087beb3392f526dc5b19efaa56b (diff) | |
Merge branch 'linus' into cpus4096
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 80 |
1 files changed, 57 insertions, 23 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 26672c6cd3ce..8f71761bc4b7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -391,17 +391,15 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | /* | 393 | /* |
| 394 | * Attempt to detach a locked page from its ->mapping. If it is dirty or if | 394 | * Same as remove_mapping, but if the page is removed from the mapping, it |
| 395 | * someone else has a ref on the page, abort and return 0. If it was | 395 | * gets returned with a refcount of 0. |
| 396 | * successfully detached, return 1. Assumes the caller has a single ref on | ||
| 397 | * this page. | ||
| 398 | */ | 396 | */ |
| 399 | int remove_mapping(struct address_space *mapping, struct page *page) | 397 | static int __remove_mapping(struct address_space *mapping, struct page *page) |
| 400 | { | 398 | { |
| 401 | BUG_ON(!PageLocked(page)); | 399 | BUG_ON(!PageLocked(page)); |
| 402 | BUG_ON(mapping != page_mapping(page)); | 400 | BUG_ON(mapping != page_mapping(page)); |
| 403 | 401 | ||
| 404 | write_lock_irq(&mapping->tree_lock); | 402 | spin_lock_irq(&mapping->tree_lock); |
| 405 | /* | 403 | /* |
| 406 | * The non racy check for a busy page. | 404 | * The non racy check for a busy page. |
| 407 | * | 405 | * |
| @@ -427,28 +425,48 @@ int remove_mapping(struct address_space *mapping, struct page *page) | |||
| 427 | * Note that if SetPageDirty is always performed via set_page_dirty, | 425 | * Note that if SetPageDirty is always performed via set_page_dirty, |
| 428 | * and thus under tree_lock, then this ordering is not required. | 426 | * and thus under tree_lock, then this ordering is not required. |
| 429 | */ | 427 | */ |
| 430 | if (unlikely(page_count(page) != 2)) | 428 | if (!page_freeze_refs(page, 2)) |
| 431 | goto cannot_free; | 429 | goto cannot_free; |
| 432 | smp_rmb(); | 430 | /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ |
| 433 | if (unlikely(PageDirty(page))) | 431 | if (unlikely(PageDirty(page))) { |
| 432 | page_unfreeze_refs(page, 2); | ||
| 434 | goto cannot_free; | 433 | goto cannot_free; |
| 434 | } | ||
| 435 | 435 | ||
| 436 | if (PageSwapCache(page)) { | 436 | if (PageSwapCache(page)) { |
| 437 | swp_entry_t swap = { .val = page_private(page) }; | 437 | swp_entry_t swap = { .val = page_private(page) }; |
| 438 | __delete_from_swap_cache(page); | 438 | __delete_from_swap_cache(page); |
| 439 | write_unlock_irq(&mapping->tree_lock); | 439 | spin_unlock_irq(&mapping->tree_lock); |
| 440 | swap_free(swap); | 440 | swap_free(swap); |
| 441 | __put_page(page); /* The pagecache ref */ | 441 | } else { |
| 442 | return 1; | 442 | __remove_from_page_cache(page); |
| 443 | spin_unlock_irq(&mapping->tree_lock); | ||
| 443 | } | 444 | } |
| 444 | 445 | ||
| 445 | __remove_from_page_cache(page); | ||
| 446 | write_unlock_irq(&mapping->tree_lock); | ||
| 447 | __put_page(page); | ||
| 448 | return 1; | 446 | return 1; |
| 449 | 447 | ||
| 450 | cannot_free: | 448 | cannot_free: |
| 451 | write_unlock_irq(&mapping->tree_lock); | 449 | spin_unlock_irq(&mapping->tree_lock); |
| 450 | return 0; | ||
| 451 | } | ||
| 452 | |||
| 453 | /* | ||
| 454 | * Attempt to detach a locked page from its ->mapping. If it is dirty or if | ||
| 455 | * someone else has a ref on the page, abort and return 0. If it was | ||
| 456 | * successfully detached, return 1. Assumes the caller has a single ref on | ||
| 457 | * this page. | ||
| 458 | */ | ||
| 459 | int remove_mapping(struct address_space *mapping, struct page *page) | ||
| 460 | { | ||
| 461 | if (__remove_mapping(mapping, page)) { | ||
| 462 | /* | ||
| 463 | * Unfreezing the refcount with 1 rather than 2 effectively | ||
| 464 | * drops the pagecache ref for us without requiring another | ||
| 465 | * atomic operation. | ||
| 466 | */ | ||
| 467 | page_unfreeze_refs(page, 1); | ||
| 468 | return 1; | ||
| 469 | } | ||
| 452 | return 0; | 470 | return 0; |
| 453 | } | 471 | } |
| 454 | 472 | ||
| @@ -598,18 +616,34 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 598 | if (PagePrivate(page)) { | 616 | if (PagePrivate(page)) { |
| 599 | if (!try_to_release_page(page, sc->gfp_mask)) | 617 | if (!try_to_release_page(page, sc->gfp_mask)) |
| 600 | goto activate_locked; | 618 | goto activate_locked; |
| 601 | if (!mapping && page_count(page) == 1) | 619 | if (!mapping && page_count(page) == 1) { |
| 602 | goto free_it; | 620 | unlock_page(page); |
| 621 | if (put_page_testzero(page)) | ||
| 622 | goto free_it; | ||
| 623 | else { | ||
| 624 | /* | ||
| 625 | * rare race with speculative reference. | ||
| 626 | * the speculative reference will free | ||
| 627 | * this page shortly, so we may | ||
| 628 | * increment nr_reclaimed here (and | ||
| 629 | * leave it off the LRU). | ||
| 630 | */ | ||
| 631 | nr_reclaimed++; | ||
| 632 | continue; | ||
| 633 | } | ||
| 634 | } | ||
| 603 | } | 635 | } |
| 604 | 636 | ||
| 605 | if (!mapping || !remove_mapping(mapping, page)) | 637 | if (!mapping || !__remove_mapping(mapping, page)) |
| 606 | goto keep_locked; | 638 | goto keep_locked; |
| 607 | 639 | ||
| 608 | free_it: | ||
| 609 | unlock_page(page); | 640 | unlock_page(page); |
| 641 | free_it: | ||
| 610 | nr_reclaimed++; | 642 | nr_reclaimed++; |
| 611 | if (!pagevec_add(&freed_pvec, page)) | 643 | if (!pagevec_add(&freed_pvec, page)) { |
| 612 | __pagevec_release_nonlru(&freed_pvec); | 644 | __pagevec_free(&freed_pvec); |
| 645 | pagevec_reinit(&freed_pvec); | ||
| 646 | } | ||
| 613 | continue; | 647 | continue; |
| 614 | 648 | ||
| 615 | activate_locked: | 649 | activate_locked: |
| @@ -623,7 +657,7 @@ keep: | |||
| 623 | } | 657 | } |
| 624 | list_splice(&ret_pages, page_list); | 658 | list_splice(&ret_pages, page_list); |
| 625 | if (pagevec_count(&freed_pvec)) | 659 | if (pagevec_count(&freed_pvec)) |
| 626 | __pagevec_release_nonlru(&freed_pvec); | 660 | __pagevec_free(&freed_pvec); |
| 627 | count_vm_events(PGACTIVATE, pgactivate); | 661 | count_vm_events(PGACTIVATE, pgactivate); |
| 628 | return nr_reclaimed; | 662 | return nr_reclaimed; |
| 629 | } | 663 | } |
