diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-01-08 04:00:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 23:12:42 -0500 |
commit | 8419c3181086c86664e8246bc997afc2e4ffba4f (patch) | |
tree | 25938e6f99bdaaffe8f6d357582eca56692b091c | |
parent | 39743889aaf76725152f16aa90ca3c45f6d52da3 (diff) |
[PATCH] SwapMig: CONFIG_MIGRATION fixes
Move move_to_lru, putback_lru_pages and isolate_lru in section surrounded by
CONFIG_MIGRATION saving some codesize for single processor kernels.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/swap.h | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 152 |
2 files changed, 77 insertions, 78 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 117add066f00..997d838f0e70 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -175,10 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t); | |||
175 | extern int shrink_all_memory(int); | 175 | extern int shrink_all_memory(int); |
176 | extern int vm_swappiness; | 176 | extern int vm_swappiness; |
177 | 177 | ||
178 | #ifdef CONFIG_MIGRATION | ||
178 | extern int isolate_lru_page(struct page *p); | 179 | extern int isolate_lru_page(struct page *p); |
179 | extern int putback_lru_pages(struct list_head *l); | 180 | extern int putback_lru_pages(struct list_head *l); |
180 | |||
181 | #ifdef CONFIG_MIGRATION | ||
182 | extern int migrate_pages(struct list_head *l, struct list_head *t); | 181 | extern int migrate_pages(struct list_head *l, struct list_head *t); |
183 | #endif | 182 | #endif |
184 | 183 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 58270aea669a..daed4a73b761 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -569,6 +569,40 @@ keep: | |||
569 | } | 569 | } |
570 | 570 | ||
571 | #ifdef CONFIG_MIGRATION | 571 | #ifdef CONFIG_MIGRATION |
572 | static inline void move_to_lru(struct page *page) | ||
573 | { | ||
574 | list_del(&page->lru); | ||
575 | if (PageActive(page)) { | ||
576 | /* | ||
577 | * lru_cache_add_active checks that | ||
578 | * the PG_active bit is off. | ||
579 | */ | ||
580 | ClearPageActive(page); | ||
581 | lru_cache_add_active(page); | ||
582 | } else { | ||
583 | lru_cache_add(page); | ||
584 | } | ||
585 | put_page(page); | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * Add isolated pages on the list back to the LRU | ||
590 | * | ||
591 | * returns the number of pages put back. | ||
592 | */ | ||
593 | int putback_lru_pages(struct list_head *l) | ||
594 | { | ||
595 | struct page *page; | ||
596 | struct page *page2; | ||
597 | int count = 0; | ||
598 | |||
599 | list_for_each_entry_safe(page, page2, l, lru) { | ||
600 | move_to_lru(page); | ||
601 | count++; | ||
602 | } | ||
603 | return count; | ||
604 | } | ||
605 | |||
572 | /* | 606 | /* |
573 | * swapout a single page | 607 | * swapout a single page |
574 | * page is locked upon entry, unlocked on exit | 608 | * page is locked upon entry, unlocked on exit |
@@ -709,6 +743,48 @@ retry_later: | |||
709 | 743 | ||
710 | return nr_failed + retry; | 744 | return nr_failed + retry; |
711 | } | 745 | } |
746 | |||
747 | static void lru_add_drain_per_cpu(void *dummy) | ||
748 | { | ||
749 | lru_add_drain(); | ||
750 | } | ||
751 | |||
752 | /* | ||
753 | * Isolate one page from the LRU lists and put it on the | ||
754 | * indicated list. Do necessary cache draining if the | ||
755 | * page is not on the LRU lists yet. | ||
756 | * | ||
757 | * Result: | ||
758 | * 0 = page not on LRU list | ||
759 | * 1 = page removed from LRU list and added to the specified list. | ||
760 | * -ENOENT = page is being freed elsewhere. | ||
761 | */ | ||
762 | int isolate_lru_page(struct page *page) | ||
763 | { | ||
764 | int rc = 0; | ||
765 | struct zone *zone = page_zone(page); | ||
766 | |||
767 | redo: | ||
768 | spin_lock_irq(&zone->lru_lock); | ||
769 | rc = __isolate_lru_page(page); | ||
770 | if (rc == 1) { | ||
771 | if (PageActive(page)) | ||
772 | del_page_from_active_list(zone, page); | ||
773 | else | ||
774 | del_page_from_inactive_list(zone, page); | ||
775 | } | ||
776 | spin_unlock_irq(&zone->lru_lock); | ||
777 | if (rc == 0) { | ||
778 | /* | ||
779 | * Maybe this page is still waiting for a cpu to drain it | ||
780 | * from one of the lru lists? | ||
781 | */ | ||
782 | rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | ||
783 | if (rc == 0 && PageLRU(page)) | ||
784 | goto redo; | ||
785 | } | ||
786 | return rc; | ||
787 | } | ||
712 | #endif | 788 | #endif |
713 | 789 | ||
714 | /* | 790 | /* |
@@ -758,48 +834,6 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, | |||
758 | return nr_taken; | 834 | return nr_taken; |
759 | } | 835 | } |
760 | 836 | ||
761 | static void lru_add_drain_per_cpu(void *dummy) | ||
762 | { | ||
763 | lru_add_drain(); | ||
764 | } | ||
765 | |||
766 | /* | ||
767 | * Isolate one page from the LRU lists and put it on the | ||
768 | * indicated list. Do necessary cache draining if the | ||
769 | * page is not on the LRU lists yet. | ||
770 | * | ||
771 | * Result: | ||
772 | * 0 = page not on LRU list | ||
773 | * 1 = page removed from LRU list and added to the specified list. | ||
774 | * -ENOENT = page is being freed elsewhere. | ||
775 | */ | ||
776 | int isolate_lru_page(struct page *page) | ||
777 | { | ||
778 | int rc = 0; | ||
779 | struct zone *zone = page_zone(page); | ||
780 | |||
781 | redo: | ||
782 | spin_lock_irq(&zone->lru_lock); | ||
783 | rc = __isolate_lru_page(page); | ||
784 | if (rc == 1) { | ||
785 | if (PageActive(page)) | ||
786 | del_page_from_active_list(zone, page); | ||
787 | else | ||
788 | del_page_from_inactive_list(zone, page); | ||
789 | } | ||
790 | spin_unlock_irq(&zone->lru_lock); | ||
791 | if (rc == 0) { | ||
792 | /* | ||
793 | * Maybe this page is still waiting for a cpu to drain it | ||
794 | * from one of the lru lists? | ||
795 | */ | ||
796 | rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | ||
797 | if (rc == 0 && PageLRU(page)) | ||
798 | goto redo; | ||
799 | } | ||
800 | return rc; | ||
801 | } | ||
802 | |||
803 | /* | 837 | /* |
804 | * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed | 838 | * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed |
805 | */ | 839 | */ |
@@ -865,40 +899,6 @@ done: | |||
865 | pagevec_release(&pvec); | 899 | pagevec_release(&pvec); |
866 | } | 900 | } |
867 | 901 | ||
868 | static inline void move_to_lru(struct page *page) | ||
869 | { | ||
870 | list_del(&page->lru); | ||
871 | if (PageActive(page)) { | ||
872 | /* | ||
873 | * lru_cache_add_active checks that | ||
874 | * the PG_active bit is off. | ||
875 | */ | ||
876 | ClearPageActive(page); | ||
877 | lru_cache_add_active(page); | ||
878 | } else { | ||
879 | lru_cache_add(page); | ||
880 | } | ||
881 | put_page(page); | ||
882 | } | ||
883 | |||
884 | /* | ||
885 | * Add isolated pages on the list back to the LRU | ||
886 | * | ||
887 | * returns the number of pages put back. | ||
888 | */ | ||
889 | int putback_lru_pages(struct list_head *l) | ||
890 | { | ||
891 | struct page *page; | ||
892 | struct page *page2; | ||
893 | int count = 0; | ||
894 | |||
895 | list_for_each_entry_safe(page, page2, l, lru) { | ||
896 | move_to_lru(page); | ||
897 | count++; | ||
898 | } | ||
899 | return count; | ||
900 | } | ||
901 | |||
902 | /* | 902 | /* |
903 | * This moves pages from the active list to the inactive list. | 903 | * This moves pages from the active list to the inactive list. |
904 | * | 904 | * |