diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2006-01-09 14:18:33 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-01-09 14:18:33 -0500 |
commit | 0a3a98f6dd4e8f4d928a09302c0d1c56f2192ac3 (patch) | |
tree | 92f55e374a84d06ce8213a4540454760fdecf137 /mm/vmscan.c | |
parent | 8ef12c9f01afba47c2d33bb939085111ca0d0f7d (diff) | |
parent | 5367f2d67c7d0bf1faae90e6e7b4e2ac3c9b5e0f (diff) |
Merge Linus' tree.
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 343 |
1 files changed, 289 insertions, 54 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index be8235fb1939..bf903b2d198f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -180,8 +180,7 @@ EXPORT_SYMBOL(remove_shrinker); | |||
180 | * | 180 | * |
181 | * Returns the number of slab objects which we shrunk. | 181 | * Returns the number of slab objects which we shrunk. |
182 | */ | 182 | */ |
183 | static int shrink_slab(unsigned long scanned, gfp_t gfp_mask, | 183 | int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) |
184 | unsigned long lru_pages) | ||
185 | { | 184 | { |
186 | struct shrinker *shrinker; | 185 | struct shrinker *shrinker; |
187 | int ret = 0; | 186 | int ret = 0; |
@@ -269,9 +268,7 @@ static inline int is_page_cache_freeable(struct page *page) | |||
269 | 268 | ||
270 | static int may_write_to_queue(struct backing_dev_info *bdi) | 269 | static int may_write_to_queue(struct backing_dev_info *bdi) |
271 | { | 270 | { |
272 | if (current_is_kswapd()) | 271 | if (current->flags & PF_SWAPWRITE) |
273 | return 1; | ||
274 | if (current_is_pdflush()) /* This is unlikely, but why not... */ | ||
275 | return 1; | 272 | return 1; |
276 | if (!bdi_write_congested(bdi)) | 273 | if (!bdi_write_congested(bdi)) |
277 | return 1; | 274 | return 1; |
@@ -376,6 +373,43 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) | |||
376 | return PAGE_CLEAN; | 373 | return PAGE_CLEAN; |
377 | } | 374 | } |
378 | 375 | ||
376 | static int remove_mapping(struct address_space *mapping, struct page *page) | ||
377 | { | ||
378 | if (!mapping) | ||
379 | return 0; /* truncate got there first */ | ||
380 | |||
381 | write_lock_irq(&mapping->tree_lock); | ||
382 | |||
383 | /* | ||
384 | * The non-racy check for busy page. It is critical to check | ||
385 | * PageDirty _after_ making sure that the page is freeable and | ||
386 | * not in use by anybody. (pagecache + us == 2) | ||
387 | */ | ||
388 | if (unlikely(page_count(page) != 2)) | ||
389 | goto cannot_free; | ||
390 | smp_rmb(); | ||
391 | if (unlikely(PageDirty(page))) | ||
392 | goto cannot_free; | ||
393 | |||
394 | if (PageSwapCache(page)) { | ||
395 | swp_entry_t swap = { .val = page_private(page) }; | ||
396 | __delete_from_swap_cache(page); | ||
397 | write_unlock_irq(&mapping->tree_lock); | ||
398 | swap_free(swap); | ||
399 | __put_page(page); /* The pagecache ref */ | ||
400 | return 1; | ||
401 | } | ||
402 | |||
403 | __remove_from_page_cache(page); | ||
404 | write_unlock_irq(&mapping->tree_lock); | ||
405 | __put_page(page); | ||
406 | return 1; | ||
407 | |||
408 | cannot_free: | ||
409 | write_unlock_irq(&mapping->tree_lock); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
379 | /* | 413 | /* |
380 | * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed | 414 | * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed |
381 | */ | 415 | */ |
@@ -424,7 +458,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
424 | * Try to allocate it some swap space here. | 458 | * Try to allocate it some swap space here. |
425 | */ | 459 | */ |
426 | if (PageAnon(page) && !PageSwapCache(page)) { | 460 | if (PageAnon(page) && !PageSwapCache(page)) { |
427 | if (!add_to_swap(page)) | 461 | if (!add_to_swap(page, GFP_ATOMIC)) |
428 | goto activate_locked; | 462 | goto activate_locked; |
429 | } | 463 | } |
430 | #endif /* CONFIG_SWAP */ | 464 | #endif /* CONFIG_SWAP */ |
@@ -507,36 +541,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
507 | goto free_it; | 541 | goto free_it; |
508 | } | 542 | } |
509 | 543 | ||
510 | if (!mapping) | 544 | if (!remove_mapping(mapping, page)) |
511 | goto keep_locked; /* truncate got there first */ | 545 | goto keep_locked; |
512 | |||
513 | write_lock_irq(&mapping->tree_lock); | ||
514 | |||
515 | /* | ||
516 | * The non-racy check for busy page. It is critical to check | ||
517 | * PageDirty _after_ making sure that the page is freeable and | ||
518 | * not in use by anybody. (pagecache + us == 2) | ||
519 | */ | ||
520 | if (unlikely(page_count(page) != 2)) | ||
521 | goto cannot_free; | ||
522 | smp_rmb(); | ||
523 | if (unlikely(PageDirty(page))) | ||
524 | goto cannot_free; | ||
525 | |||
526 | #ifdef CONFIG_SWAP | ||
527 | if (PageSwapCache(page)) { | ||
528 | swp_entry_t swap = { .val = page_private(page) }; | ||
529 | __delete_from_swap_cache(page); | ||
530 | write_unlock_irq(&mapping->tree_lock); | ||
531 | swap_free(swap); | ||
532 | __put_page(page); /* The pagecache ref */ | ||
533 | goto free_it; | ||
534 | } | ||
535 | #endif /* CONFIG_SWAP */ | ||
536 | |||
537 | __remove_from_page_cache(page); | ||
538 | write_unlock_irq(&mapping->tree_lock); | ||
539 | __put_page(page); | ||
540 | 546 | ||
541 | free_it: | 547 | free_it: |
542 | unlock_page(page); | 548 | unlock_page(page); |
@@ -545,10 +551,6 @@ free_it: | |||
545 | __pagevec_release_nonlru(&freed_pvec); | 551 | __pagevec_release_nonlru(&freed_pvec); |
546 | continue; | 552 | continue; |
547 | 553 | ||
548 | cannot_free: | ||
549 | write_unlock_irq(&mapping->tree_lock); | ||
550 | goto keep_locked; | ||
551 | |||
552 | activate_locked: | 554 | activate_locked: |
553 | SetPageActive(page); | 555 | SetPageActive(page); |
554 | pgactivate++; | 556 | pgactivate++; |
@@ -566,6 +568,241 @@ keep: | |||
566 | return reclaimed; | 568 | return reclaimed; |
567 | } | 569 | } |
568 | 570 | ||
571 | #ifdef CONFIG_MIGRATION | ||
572 | static inline void move_to_lru(struct page *page) | ||
573 | { | ||
574 | list_del(&page->lru); | ||
575 | if (PageActive(page)) { | ||
576 | /* | ||
577 | * lru_cache_add_active checks that | ||
578 | * the PG_active bit is off. | ||
579 | */ | ||
580 | ClearPageActive(page); | ||
581 | lru_cache_add_active(page); | ||
582 | } else { | ||
583 | lru_cache_add(page); | ||
584 | } | ||
585 | put_page(page); | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * Add isolated pages on the list back to the LRU | ||
590 | * | ||
591 | * returns the number of pages put back. | ||
592 | */ | ||
593 | int putback_lru_pages(struct list_head *l) | ||
594 | { | ||
595 | struct page *page; | ||
596 | struct page *page2; | ||
597 | int count = 0; | ||
598 | |||
599 | list_for_each_entry_safe(page, page2, l, lru) { | ||
600 | move_to_lru(page); | ||
601 | count++; | ||
602 | } | ||
603 | return count; | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * swapout a single page | ||
608 | * page is locked upon entry, unlocked on exit | ||
609 | */ | ||
610 | static int swap_page(struct page *page) | ||
611 | { | ||
612 | struct address_space *mapping = page_mapping(page); | ||
613 | |||
614 | if (page_mapped(page) && mapping) | ||
615 | if (try_to_unmap(page) != SWAP_SUCCESS) | ||
616 | goto unlock_retry; | ||
617 | |||
618 | if (PageDirty(page)) { | ||
619 | /* Page is dirty, try to write it out here */ | ||
620 | switch(pageout(page, mapping)) { | ||
621 | case PAGE_KEEP: | ||
622 | case PAGE_ACTIVATE: | ||
623 | goto unlock_retry; | ||
624 | |||
625 | case PAGE_SUCCESS: | ||
626 | goto retry; | ||
627 | |||
628 | case PAGE_CLEAN: | ||
629 | ; /* try to free the page below */ | ||
630 | } | ||
631 | } | ||
632 | |||
633 | if (PagePrivate(page)) { | ||
634 | if (!try_to_release_page(page, GFP_KERNEL) || | ||
635 | (!mapping && page_count(page) == 1)) | ||
636 | goto unlock_retry; | ||
637 | } | ||
638 | |||
639 | if (remove_mapping(mapping, page)) { | ||
640 | /* Success */ | ||
641 | unlock_page(page); | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | unlock_retry: | ||
646 | unlock_page(page); | ||
647 | |||
648 | retry: | ||
649 | return -EAGAIN; | ||
650 | } | ||
651 | /* | ||
652 | * migrate_pages | ||
653 | * | ||
654 | * Two lists are passed to this function. The first list | ||
655 | * contains the pages isolated from the LRU to be migrated. | ||
656 | * The second list contains new pages that the pages isolated | ||
657 | * can be moved to. If the second list is NULL then all | ||
658 | * pages are swapped out. | ||
659 | * | ||
660 | * The function returns after 10 attempts or if no pages | ||
661 | * are movable anymore because t has become empty | ||
662 | * or no retryable pages exist anymore. | ||
663 | * | ||
664 | * SIMPLIFIED VERSION: This implementation of migrate_pages | ||
665 | * is only swapping out pages and never touches the second | ||
666 | * list. The direct migration patchset | ||
667 | * extends this function to avoid the use of swap. | ||
668 | * | ||
669 | * Return: Number of pages not migrated when "to" ran empty. | ||
670 | */ | ||
671 | int migrate_pages(struct list_head *from, struct list_head *to, | ||
672 | struct list_head *moved, struct list_head *failed) | ||
673 | { | ||
674 | int retry; | ||
675 | int nr_failed = 0; | ||
676 | int pass = 0; | ||
677 | struct page *page; | ||
678 | struct page *page2; | ||
679 | int swapwrite = current->flags & PF_SWAPWRITE; | ||
680 | int rc; | ||
681 | |||
682 | if (!swapwrite) | ||
683 | current->flags |= PF_SWAPWRITE; | ||
684 | |||
685 | redo: | ||
686 | retry = 0; | ||
687 | |||
688 | list_for_each_entry_safe(page, page2, from, lru) { | ||
689 | cond_resched(); | ||
690 | |||
691 | rc = 0; | ||
692 | if (page_count(page) == 1) | ||
693 | /* page was freed from under us. So we are done. */ | ||
694 | goto next; | ||
695 | |||
696 | /* | ||
697 | * Skip locked pages during the first two passes to give the | ||
698 | * functions holding the lock time to release the page. Later we | ||
699 | * use lock_page() to have a higher chance of acquiring the | ||
700 | * lock. | ||
701 | */ | ||
702 | rc = -EAGAIN; | ||
703 | if (pass > 2) | ||
704 | lock_page(page); | ||
705 | else | ||
706 | if (TestSetPageLocked(page)) | ||
707 | goto next; | ||
708 | |||
709 | /* | ||
710 | * Only wait on writeback if we have already done a pass where | ||
711 | * we we may have triggered writeouts for lots of pages. | ||
712 | */ | ||
713 | if (pass > 0) { | ||
714 | wait_on_page_writeback(page); | ||
715 | } else { | ||
716 | if (PageWriteback(page)) | ||
717 | goto unlock_page; | ||
718 | } | ||
719 | |||
720 | /* | ||
721 | * Anonymous pages must have swap cache references otherwise | ||
722 | * the information contained in the page maps cannot be | ||
723 | * preserved. | ||
724 | */ | ||
725 | if (PageAnon(page) && !PageSwapCache(page)) { | ||
726 | if (!add_to_swap(page, GFP_KERNEL)) { | ||
727 | rc = -ENOMEM; | ||
728 | goto unlock_page; | ||
729 | } | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * Page is properly locked and writeback is complete. | ||
734 | * Try to migrate the page. | ||
735 | */ | ||
736 | rc = swap_page(page); | ||
737 | goto next; | ||
738 | |||
739 | unlock_page: | ||
740 | unlock_page(page); | ||
741 | |||
742 | next: | ||
743 | if (rc == -EAGAIN) { | ||
744 | retry++; | ||
745 | } else if (rc) { | ||
746 | /* Permanent failure */ | ||
747 | list_move(&page->lru, failed); | ||
748 | nr_failed++; | ||
749 | } else { | ||
750 | /* Success */ | ||
751 | list_move(&page->lru, moved); | ||
752 | } | ||
753 | } | ||
754 | if (retry && pass++ < 10) | ||
755 | goto redo; | ||
756 | |||
757 | if (!swapwrite) | ||
758 | current->flags &= ~PF_SWAPWRITE; | ||
759 | |||
760 | return nr_failed + retry; | ||
761 | } | ||
762 | |||
763 | static void lru_add_drain_per_cpu(void *dummy) | ||
764 | { | ||
765 | lru_add_drain(); | ||
766 | } | ||
767 | |||
768 | /* | ||
769 | * Isolate one page from the LRU lists and put it on the | ||
770 | * indicated list. Do necessary cache draining if the | ||
771 | * page is not on the LRU lists yet. | ||
772 | * | ||
773 | * Result: | ||
774 | * 0 = page not on LRU list | ||
775 | * 1 = page removed from LRU list and added to the specified list. | ||
776 | * -ENOENT = page is being freed elsewhere. | ||
777 | */ | ||
778 | int isolate_lru_page(struct page *page) | ||
779 | { | ||
780 | int rc = 0; | ||
781 | struct zone *zone = page_zone(page); | ||
782 | |||
783 | redo: | ||
784 | spin_lock_irq(&zone->lru_lock); | ||
785 | rc = __isolate_lru_page(page); | ||
786 | if (rc == 1) { | ||
787 | if (PageActive(page)) | ||
788 | del_page_from_active_list(zone, page); | ||
789 | else | ||
790 | del_page_from_inactive_list(zone, page); | ||
791 | } | ||
792 | spin_unlock_irq(&zone->lru_lock); | ||
793 | if (rc == 0) { | ||
794 | /* | ||
795 | * Maybe this page is still waiting for a cpu to drain it | ||
796 | * from one of the lru lists? | ||
797 | */ | ||
798 | rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | ||
799 | if (rc == 0 && PageLRU(page)) | ||
800 | goto redo; | ||
801 | } | ||
802 | return rc; | ||
803 | } | ||
804 | #endif | ||
805 | |||
569 | /* | 806 | /* |
570 | * zone->lru_lock is heavily contended. Some of the functions that | 807 | * zone->lru_lock is heavily contended. Some of the functions that |
571 | * shrink the lists perform better by taking out a batch of pages | 808 | * shrink the lists perform better by taking out a batch of pages |
@@ -594,20 +831,18 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, | |||
594 | page = lru_to_page(src); | 831 | page = lru_to_page(src); |
595 | prefetchw_prev_lru_page(page, src, flags); | 832 | prefetchw_prev_lru_page(page, src, flags); |
596 | 833 | ||
597 | if (!TestClearPageLRU(page)) | 834 | switch (__isolate_lru_page(page)) { |
598 | BUG(); | 835 | case 1: |
599 | list_del(&page->lru); | 836 | /* Succeeded to isolate page */ |
600 | if (get_page_testone(page)) { | 837 | list_move(&page->lru, dst); |
601 | /* | ||
602 | * It is being freed elsewhere | ||
603 | */ | ||
604 | __put_page(page); | ||
605 | SetPageLRU(page); | ||
606 | list_add(&page->lru, src); | ||
607 | continue; | ||
608 | } else { | ||
609 | list_add(&page->lru, dst); | ||
610 | nr_taken++; | 838 | nr_taken++; |
839 | break; | ||
840 | case -ENOENT: | ||
841 | /* Not possible to isolate */ | ||
842 | list_move(&page->lru, src); | ||
843 | break; | ||
844 | default: | ||
845 | BUG(); | ||
611 | } | 846 | } |
612 | } | 847 | } |
613 | 848 | ||
@@ -1226,7 +1461,7 @@ static int kswapd(void *p) | |||
1226 | * us from recursively trying to free more memory as we're | 1461 | * us from recursively trying to free more memory as we're |
1227 | * trying to free the first piece of memory in the first place). | 1462 | * trying to free the first piece of memory in the first place). |
1228 | */ | 1463 | */ |
1229 | tsk->flags |= PF_MEMALLOC|PF_KSWAPD; | 1464 | tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; |
1230 | 1465 | ||
1231 | order = 0; | 1466 | order = 0; |
1232 | for ( ; ; ) { | 1467 | for ( ; ; ) { |