aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-02-23 22:05:47 -0500
committerPaul Mackerras <paulus@samba.org>2006-02-23 22:05:47 -0500
commita00428f5b149e36b8225b2a0812742a6dfb07b8c (patch)
treea78869cd67cf78a0eb091fb0ea5d397734bd6738 /mm/vmscan.c
parent774fee58c465ea1c7e9775e347ec307bcf2deeb3 (diff)
parentfb5c594c2acc441f0d2d8f457484a0e0e9285db3 (diff)
Merge ../powerpc-merge
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c106
1 files changed, 65 insertions, 41 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5a610804cd06..1838c15ca4fd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -443,6 +443,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
443 BUG_ON(PageActive(page)); 443 BUG_ON(PageActive(page));
444 444
445 sc->nr_scanned++; 445 sc->nr_scanned++;
446
447 if (!sc->may_swap && page_mapped(page))
448 goto keep_locked;
449
446 /* Double the slab pressure for mapped and swapcache pages */ 450 /* Double the slab pressure for mapped and swapcache pages */
447 if (page_mapped(page) || PageSwapCache(page)) 451 if (page_mapped(page) || PageSwapCache(page))
448 sc->nr_scanned++; 452 sc->nr_scanned++;
@@ -632,7 +636,7 @@ static int swap_page(struct page *page)
632 struct address_space *mapping = page_mapping(page); 636 struct address_space *mapping = page_mapping(page);
633 637
634 if (page_mapped(page) && mapping) 638 if (page_mapped(page) && mapping)
635 if (try_to_unmap(page, 0) != SWAP_SUCCESS) 639 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
636 goto unlock_retry; 640 goto unlock_retry;
637 641
638 if (PageDirty(page)) { 642 if (PageDirty(page)) {
@@ -839,7 +843,7 @@ EXPORT_SYMBOL(migrate_page);
839 * pages are swapped out. 843 * pages are swapped out.
840 * 844 *
841 * The function returns after 10 attempts or if no pages 845 * The function returns after 10 attempts or if no pages
842 * are movable anymore because t has become empty 846 * are movable anymore because to has become empty
843 * or no retryable pages exist anymore. 847 * or no retryable pages exist anymore.
844 * 848 *
845 * Return: Number of pages not migrated when "to" ran empty. 849 * Return: Number of pages not migrated when "to" ran empty.
@@ -928,12 +932,21 @@ redo:
928 goto unlock_both; 932 goto unlock_both;
929 933
930 if (mapping->a_ops->migratepage) { 934 if (mapping->a_ops->migratepage) {
935 /*
936 * Most pages have a mapping and most filesystems
937 * should provide a migration function. Anonymous
938 * pages are part of swap space which also has its
939 * own migration function. This is the most common
940 * path for page migration.
941 */
931 rc = mapping->a_ops->migratepage(newpage, page); 942 rc = mapping->a_ops->migratepage(newpage, page);
932 goto unlock_both; 943 goto unlock_both;
933 } 944 }
934 945
935 /* 946 /*
936 * Trigger writeout if page is dirty 947 * Default handling if a filesystem does not provide
948 * a migration function. We can only migrate clean
949 * pages so try to write out any dirty pages first.
937 */ 950 */
938 if (PageDirty(page)) { 951 if (PageDirty(page)) {
939 switch (pageout(page, mapping)) { 952 switch (pageout(page, mapping)) {
@@ -949,9 +962,10 @@ redo:
949 ; /* try to migrate the page below */ 962 ; /* try to migrate the page below */
950 } 963 }
951 } 964 }
965
952 /* 966 /*
953 * If we have no buffer or can release the buffer 967 * Buffers are managed in a filesystem specific way.
954 * then do a simple migration. 968 * We must have no buffers or drop them.
955 */ 969 */
956 if (!page_has_buffers(page) || 970 if (!page_has_buffers(page) ||
957 try_to_release_page(page, GFP_KERNEL)) { 971 try_to_release_page(page, GFP_KERNEL)) {
@@ -966,6 +980,11 @@ redo:
966 * swap them out. 980 * swap them out.
967 */ 981 */
968 if (pass > 4) { 982 if (pass > 4) {
983 /*
984 * Persistently unable to drop buffers..... As a
985 * measure of last resort we fall back to
986 * swap_page().
987 */
969 unlock_page(newpage); 988 unlock_page(newpage);
970 newpage = NULL; 989 newpage = NULL;
971 rc = swap_page(page); 990 rc = swap_page(page);
@@ -1176,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
1176 struct page *page; 1195 struct page *page;
1177 struct pagevec pvec; 1196 struct pagevec pvec;
1178 int reclaim_mapped = 0; 1197 int reclaim_mapped = 0;
1179 long mapped_ratio; 1198
1180 long distress; 1199 if (unlikely(sc->may_swap)) {
1181 long swap_tendency; 1200 long mapped_ratio;
1201 long distress;
1202 long swap_tendency;
1203
1204 /*
1205 * `distress' is a measure of how much trouble we're having
1206 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
1207 */
1208 distress = 100 >> zone->prev_priority;
1209
1210 /*
1211 * The point of this algorithm is to decide when to start
1212 * reclaiming mapped memory instead of just pagecache. Work out
1213 * how much memory
1214 * is mapped.
1215 */
1216 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1217
1218 /*
1219 * Now decide how much we really want to unmap some pages. The
1220 * mapped ratio is downgraded - just because there's a lot of
1221 * mapped memory doesn't necessarily mean that page reclaim
1222 * isn't succeeding.
1223 *
1224 * The distress ratio is important - we don't want to start
1225 * going oom.
1226 *
1227 * A 100% value of vm_swappiness overrides this algorithm
1228 * altogether.
1229 */
1230 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1231
1232 /*
1233 * Now use this metric to decide whether to start moving mapped
1234 * memory onto the inactive list.
1235 */
1236 if (swap_tendency >= 100)
1237 reclaim_mapped = 1;
1238 }
1182 1239
1183 lru_add_drain(); 1240 lru_add_drain();
1184 spin_lock_irq(&zone->lru_lock); 1241 spin_lock_irq(&zone->lru_lock);
@@ -1188,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
1188 zone->nr_active -= pgmoved; 1245 zone->nr_active -= pgmoved;
1189 spin_unlock_irq(&zone->lru_lock); 1246 spin_unlock_irq(&zone->lru_lock);
1190 1247
1191 /*
1192 * `distress' is a measure of how much trouble we're having reclaiming
1193 * pages. 0 -> no problems. 100 -> great trouble.
1194 */
1195 distress = 100 >> zone->prev_priority;
1196
1197 /*
1198 * The point of this algorithm is to decide when to start reclaiming
1199 * mapped memory instead of just pagecache. Work out how much memory
1200 * is mapped.
1201 */
1202 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1203
1204 /*
1205 * Now decide how much we really want to unmap some pages. The mapped
1206 * ratio is downgraded - just because there's a lot of mapped memory
1207 * doesn't necessarily mean that page reclaim isn't succeeding.
1208 *
1209 * The distress ratio is important - we don't want to start going oom.
1210 *
1211 * A 100% value of vm_swappiness overrides this algorithm altogether.
1212 */
1213 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1214
1215 /*
1216 * Now use this metric to decide whether to start moving mapped memory
1217 * onto the inactive list.
1218 */
1219 if (swap_tendency >= 100)
1220 reclaim_mapped = 1;
1221
1222 while (!list_empty(&l_hold)) { 1248 while (!list_empty(&l_hold)) {
1223 cond_resched(); 1249 cond_resched();
1224 page = lru_to_page(&l_hold); 1250 page = lru_to_page(&l_hold);
@@ -1595,9 +1621,7 @@ scan:
1595 sc.nr_reclaimed = 0; 1621 sc.nr_reclaimed = 0;
1596 sc.priority = priority; 1622 sc.priority = priority;
1597 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1623 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
1598 atomic_inc(&zone->reclaim_in_progress);
1599 shrink_zone(zone, &sc); 1624 shrink_zone(zone, &sc);
1600 atomic_dec(&zone->reclaim_in_progress);
1601 reclaim_state->reclaimed_slab = 0; 1625 reclaim_state->reclaimed_slab = 0;
1602 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1626 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1603 lru_pages); 1627 lru_pages);