diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 143 |
1 files changed, 77 insertions, 66 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index d196f46c8808..b07c48b09a93 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -52,6 +52,9 @@ struct scan_control { | |||
52 | /* Incremented by the number of inactive pages that were scanned */ | 52 | /* Incremented by the number of inactive pages that were scanned */ |
53 | unsigned long nr_scanned; | 53 | unsigned long nr_scanned; |
54 | 54 | ||
55 | /* Number of pages freed so far during a call to shrink_zones() */ | ||
56 | unsigned long nr_reclaimed; | ||
57 | |||
55 | /* This context's GFP mask */ | 58 | /* This context's GFP mask */ |
56 | gfp_t gfp_mask; | 59 | gfp_t gfp_mask; |
57 | 60 | ||
@@ -617,7 +620,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
617 | referenced && page_mapping_inuse(page)) | 620 | referenced && page_mapping_inuse(page)) |
618 | goto activate_locked; | 621 | goto activate_locked; |
619 | 622 | ||
620 | #ifdef CONFIG_SWAP | ||
621 | /* | 623 | /* |
622 | * Anonymous process memory has backing store? | 624 | * Anonymous process memory has backing store? |
623 | * Try to allocate it some swap space here. | 625 | * Try to allocate it some swap space here. |
@@ -625,20 +627,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
625 | if (PageAnon(page) && !PageSwapCache(page)) { | 627 | if (PageAnon(page) && !PageSwapCache(page)) { |
626 | if (!(sc->gfp_mask & __GFP_IO)) | 628 | if (!(sc->gfp_mask & __GFP_IO)) |
627 | goto keep_locked; | 629 | goto keep_locked; |
628 | switch (try_to_munlock(page)) { | 630 | if (!add_to_swap(page)) |
629 | case SWAP_FAIL: /* shouldn't happen */ | ||
630 | case SWAP_AGAIN: | ||
631 | goto keep_locked; | ||
632 | case SWAP_MLOCK: | ||
633 | goto cull_mlocked; | ||
634 | case SWAP_SUCCESS: | ||
635 | ; /* fall thru'; add to swap cache */ | ||
636 | } | ||
637 | if (!add_to_swap(page, GFP_ATOMIC)) | ||
638 | goto activate_locked; | 631 | goto activate_locked; |
639 | may_enter_fs = 1; | 632 | may_enter_fs = 1; |
640 | } | 633 | } |
641 | #endif /* CONFIG_SWAP */ | ||
642 | 634 | ||
643 | mapping = page_mapping(page); | 635 | mapping = page_mapping(page); |
644 | 636 | ||
@@ -752,6 +744,8 @@ free_it: | |||
752 | continue; | 744 | continue; |
753 | 745 | ||
754 | cull_mlocked: | 746 | cull_mlocked: |
747 | if (PageSwapCache(page)) | ||
748 | try_to_free_swap(page); | ||
755 | unlock_page(page); | 749 | unlock_page(page); |
756 | putback_lru_page(page); | 750 | putback_lru_page(page); |
757 | continue; | 751 | continue; |
@@ -759,7 +753,7 @@ cull_mlocked: | |||
759 | activate_locked: | 753 | activate_locked: |
760 | /* Not a candidate for swapping, so reclaim swap space. */ | 754 | /* Not a candidate for swapping, so reclaim swap space. */ |
761 | if (PageSwapCache(page) && vm_swap_full()) | 755 | if (PageSwapCache(page) && vm_swap_full()) |
762 | remove_exclusive_swap_page_ref(page); | 756 | try_to_free_swap(page); |
763 | VM_BUG_ON(PageActive(page)); | 757 | VM_BUG_ON(PageActive(page)); |
764 | SetPageActive(page); | 758 | SetPageActive(page); |
765 | pgactivate++; | 759 | pgactivate++; |
@@ -1173,11 +1167,6 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority) | |||
1173 | zone->prev_priority = priority; | 1167 | zone->prev_priority = priority; |
1174 | } | 1168 | } |
1175 | 1169 | ||
1176 | static inline int zone_is_near_oom(struct zone *zone) | ||
1177 | { | ||
1178 | return zone->pages_scanned >= (zone_lru_pages(zone) * 3); | ||
1179 | } | ||
1180 | |||
1181 | /* | 1170 | /* |
1182 | * This moves pages from the active list to the inactive list. | 1171 | * This moves pages from the active list to the inactive list. |
1183 | * | 1172 | * |
@@ -1248,6 +1237,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1248 | list_add(&page->lru, &l_inactive); | 1237 | list_add(&page->lru, &l_inactive); |
1249 | } | 1238 | } |
1250 | 1239 | ||
1240 | /* | ||
1241 | * Move the pages to the [file or anon] inactive list. | ||
1242 | */ | ||
1243 | pagevec_init(&pvec, 1); | ||
1244 | pgmoved = 0; | ||
1245 | lru = LRU_BASE + file * LRU_FILE; | ||
1246 | |||
1251 | spin_lock_irq(&zone->lru_lock); | 1247 | spin_lock_irq(&zone->lru_lock); |
1252 | /* | 1248 | /* |
1253 | * Count referenced pages from currently used mappings as | 1249 | * Count referenced pages from currently used mappings as |
@@ -1255,15 +1251,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1255 | * This helps balance scan pressure between file and anonymous | 1251 | * This helps balance scan pressure between file and anonymous |
1256 | * pages in get_scan_ratio. | 1252 | * pages in get_scan_ratio. |
1257 | */ | 1253 | */ |
1258 | zone->recent_rotated[!!file] += pgmoved; | 1254 | if (scan_global_lru(sc)) |
1259 | 1255 | zone->recent_rotated[!!file] += pgmoved; | |
1260 | /* | ||
1261 | * Move the pages to the [file or anon] inactive list. | ||
1262 | */ | ||
1263 | pagevec_init(&pvec, 1); | ||
1264 | 1256 | ||
1265 | pgmoved = 0; | ||
1266 | lru = LRU_BASE + file * LRU_FILE; | ||
1267 | while (!list_empty(&l_inactive)) { | 1257 | while (!list_empty(&l_inactive)) { |
1268 | page = lru_to_page(&l_inactive); | 1258 | page = lru_to_page(&l_inactive); |
1269 | prefetchw_prev_lru_page(page, &l_inactive, flags); | 1259 | prefetchw_prev_lru_page(page, &l_inactive, flags); |
@@ -1336,12 +1326,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1336 | unsigned long anon_prio, file_prio; | 1326 | unsigned long anon_prio, file_prio; |
1337 | unsigned long ap, fp; | 1327 | unsigned long ap, fp; |
1338 | 1328 | ||
1339 | anon = zone_page_state(zone, NR_ACTIVE_ANON) + | ||
1340 | zone_page_state(zone, NR_INACTIVE_ANON); | ||
1341 | file = zone_page_state(zone, NR_ACTIVE_FILE) + | ||
1342 | zone_page_state(zone, NR_INACTIVE_FILE); | ||
1343 | free = zone_page_state(zone, NR_FREE_PAGES); | ||
1344 | |||
1345 | /* If we have no swap space, do not bother scanning anon pages. */ | 1329 | /* If we have no swap space, do not bother scanning anon pages. */ |
1346 | if (nr_swap_pages <= 0) { | 1330 | if (nr_swap_pages <= 0) { |
1347 | percent[0] = 0; | 1331 | percent[0] = 0; |
@@ -1349,6 +1333,12 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1349 | return; | 1333 | return; |
1350 | } | 1334 | } |
1351 | 1335 | ||
1336 | anon = zone_page_state(zone, NR_ACTIVE_ANON) + | ||
1337 | zone_page_state(zone, NR_INACTIVE_ANON); | ||
1338 | file = zone_page_state(zone, NR_ACTIVE_FILE) + | ||
1339 | zone_page_state(zone, NR_INACTIVE_FILE); | ||
1340 | free = zone_page_state(zone, NR_FREE_PAGES); | ||
1341 | |||
1352 | /* If we have very few page cache pages, force-scan anon pages. */ | 1342 | /* If we have very few page cache pages, force-scan anon pages. */ |
1353 | if (unlikely(file + free <= zone->pages_high)) { | 1343 | if (unlikely(file + free <= zone->pages_high)) { |
1354 | percent[0] = 100; | 1344 | percent[0] = 100; |
@@ -1408,14 +1398,15 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1408 | /* | 1398 | /* |
1409 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 1399 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
1410 | */ | 1400 | */ |
1411 | static unsigned long shrink_zone(int priority, struct zone *zone, | 1401 | static void shrink_zone(int priority, struct zone *zone, |
1412 | struct scan_control *sc) | 1402 | struct scan_control *sc) |
1413 | { | 1403 | { |
1414 | unsigned long nr[NR_LRU_LISTS]; | 1404 | unsigned long nr[NR_LRU_LISTS]; |
1415 | unsigned long nr_to_scan; | 1405 | unsigned long nr_to_scan; |
1416 | unsigned long nr_reclaimed = 0; | ||
1417 | unsigned long percent[2]; /* anon @ 0; file @ 1 */ | 1406 | unsigned long percent[2]; /* anon @ 0; file @ 1 */ |
1418 | enum lru_list l; | 1407 | enum lru_list l; |
1408 | unsigned long nr_reclaimed = sc->nr_reclaimed; | ||
1409 | unsigned long swap_cluster_max = sc->swap_cluster_max; | ||
1419 | 1410 | ||
1420 | get_scan_ratio(zone, sc, percent); | 1411 | get_scan_ratio(zone, sc, percent); |
1421 | 1412 | ||
@@ -1431,7 +1422,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1431 | } | 1422 | } |
1432 | zone->lru[l].nr_scan += scan; | 1423 | zone->lru[l].nr_scan += scan; |
1433 | nr[l] = zone->lru[l].nr_scan; | 1424 | nr[l] = zone->lru[l].nr_scan; |
1434 | if (nr[l] >= sc->swap_cluster_max) | 1425 | if (nr[l] >= swap_cluster_max) |
1435 | zone->lru[l].nr_scan = 0; | 1426 | zone->lru[l].nr_scan = 0; |
1436 | else | 1427 | else |
1437 | nr[l] = 0; | 1428 | nr[l] = 0; |
@@ -1450,16 +1441,28 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1450 | nr[LRU_INACTIVE_FILE]) { | 1441 | nr[LRU_INACTIVE_FILE]) { |
1451 | for_each_evictable_lru(l) { | 1442 | for_each_evictable_lru(l) { |
1452 | if (nr[l]) { | 1443 | if (nr[l]) { |
1453 | nr_to_scan = min(nr[l], | 1444 | nr_to_scan = min(nr[l], swap_cluster_max); |
1454 | (unsigned long)sc->swap_cluster_max); | ||
1455 | nr[l] -= nr_to_scan; | 1445 | nr[l] -= nr_to_scan; |
1456 | 1446 | ||
1457 | nr_reclaimed += shrink_list(l, nr_to_scan, | 1447 | nr_reclaimed += shrink_list(l, nr_to_scan, |
1458 | zone, sc, priority); | 1448 | zone, sc, priority); |
1459 | } | 1449 | } |
1460 | } | 1450 | } |
1451 | /* | ||
1452 | * On large memory systems, scan >> priority can become | ||
1453 | * really large. This is fine for the starting priority; | ||
1454 | * we want to put equal scanning pressure on each zone. | ||
1455 | * However, if the VM has a harder time of freeing pages, | ||
1456 | * with multiple processes reclaiming pages, the total | ||
1457 | * freeing target can get unreasonably large. | ||
1458 | */ | ||
1459 | if (nr_reclaimed > swap_cluster_max && | ||
1460 | priority < DEF_PRIORITY && !current_is_kswapd()) | ||
1461 | break; | ||
1461 | } | 1462 | } |
1462 | 1463 | ||
1464 | sc->nr_reclaimed = nr_reclaimed; | ||
1465 | |||
1463 | /* | 1466 | /* |
1464 | * Even if we did not try to evict anon pages at all, we want to | 1467 | * Even if we did not try to evict anon pages at all, we want to |
1465 | * rebalance the anon lru active/inactive ratio. | 1468 | * rebalance the anon lru active/inactive ratio. |
@@ -1470,7 +1473,6 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1470 | shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); | 1473 | shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); |
1471 | 1474 | ||
1472 | throttle_vm_writeout(sc->gfp_mask); | 1475 | throttle_vm_writeout(sc->gfp_mask); |
1473 | return nr_reclaimed; | ||
1474 | } | 1476 | } |
1475 | 1477 | ||
1476 | /* | 1478 | /* |
@@ -1484,16 +1486,13 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1484 | * b) The zones may be over pages_high but they must go *over* pages_high to | 1486 | * b) The zones may be over pages_high but they must go *over* pages_high to |
1485 | * satisfy the `incremental min' zone defense algorithm. | 1487 | * satisfy the `incremental min' zone defense algorithm. |
1486 | * | 1488 | * |
1487 | * Returns the number of reclaimed pages. | ||
1488 | * | ||
1489 | * If a zone is deemed to be full of pinned pages then just give it a light | 1489 | * If a zone is deemed to be full of pinned pages then just give it a light |
1490 | * scan then give up on it. | 1490 | * scan then give up on it. |
1491 | */ | 1491 | */ |
1492 | static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | 1492 | static void shrink_zones(int priority, struct zonelist *zonelist, |
1493 | struct scan_control *sc) | 1493 | struct scan_control *sc) |
1494 | { | 1494 | { |
1495 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 1495 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); |
1496 | unsigned long nr_reclaimed = 0; | ||
1497 | struct zoneref *z; | 1496 | struct zoneref *z; |
1498 | struct zone *zone; | 1497 | struct zone *zone; |
1499 | 1498 | ||
@@ -1524,10 +1523,8 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | |||
1524 | priority); | 1523 | priority); |
1525 | } | 1524 | } |
1526 | 1525 | ||
1527 | nr_reclaimed += shrink_zone(priority, zone, sc); | 1526 | shrink_zone(priority, zone, sc); |
1528 | } | 1527 | } |
1529 | |||
1530 | return nr_reclaimed; | ||
1531 | } | 1528 | } |
1532 | 1529 | ||
1533 | /* | 1530 | /* |
@@ -1552,7 +1549,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1552 | int priority; | 1549 | int priority; |
1553 | unsigned long ret = 0; | 1550 | unsigned long ret = 0; |
1554 | unsigned long total_scanned = 0; | 1551 | unsigned long total_scanned = 0; |
1555 | unsigned long nr_reclaimed = 0; | ||
1556 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1552 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1557 | unsigned long lru_pages = 0; | 1553 | unsigned long lru_pages = 0; |
1558 | struct zoneref *z; | 1554 | struct zoneref *z; |
@@ -1580,7 +1576,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1580 | sc->nr_scanned = 0; | 1576 | sc->nr_scanned = 0; |
1581 | if (!priority) | 1577 | if (!priority) |
1582 | disable_swap_token(); | 1578 | disable_swap_token(); |
1583 | nr_reclaimed += shrink_zones(priority, zonelist, sc); | 1579 | shrink_zones(priority, zonelist, sc); |
1584 | /* | 1580 | /* |
1585 | * Don't shrink slabs when reclaiming memory from | 1581 | * Don't shrink slabs when reclaiming memory from |
1586 | * over limit cgroups | 1582 | * over limit cgroups |
@@ -1588,13 +1584,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1588 | if (scan_global_lru(sc)) { | 1584 | if (scan_global_lru(sc)) { |
1589 | shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); | 1585 | shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); |
1590 | if (reclaim_state) { | 1586 | if (reclaim_state) { |
1591 | nr_reclaimed += reclaim_state->reclaimed_slab; | 1587 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; |
1592 | reclaim_state->reclaimed_slab = 0; | 1588 | reclaim_state->reclaimed_slab = 0; |
1593 | } | 1589 | } |
1594 | } | 1590 | } |
1595 | total_scanned += sc->nr_scanned; | 1591 | total_scanned += sc->nr_scanned; |
1596 | if (nr_reclaimed >= sc->swap_cluster_max) { | 1592 | if (sc->nr_reclaimed >= sc->swap_cluster_max) { |
1597 | ret = nr_reclaimed; | 1593 | ret = sc->nr_reclaimed; |
1598 | goto out; | 1594 | goto out; |
1599 | } | 1595 | } |
1600 | 1596 | ||
@@ -1617,7 +1613,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1617 | } | 1613 | } |
1618 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 1614 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
1619 | if (!sc->all_unreclaimable && scan_global_lru(sc)) | 1615 | if (!sc->all_unreclaimable && scan_global_lru(sc)) |
1620 | ret = nr_reclaimed; | 1616 | ret = sc->nr_reclaimed; |
1621 | out: | 1617 | out: |
1622 | /* | 1618 | /* |
1623 | * Now that we've scanned all the zones at this priority level, note | 1619 | * Now that we've scanned all the zones at this priority level, note |
@@ -1712,7 +1708,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
1712 | int priority; | 1708 | int priority; |
1713 | int i; | 1709 | int i; |
1714 | unsigned long total_scanned; | 1710 | unsigned long total_scanned; |
1715 | unsigned long nr_reclaimed; | ||
1716 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1711 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1717 | struct scan_control sc = { | 1712 | struct scan_control sc = { |
1718 | .gfp_mask = GFP_KERNEL, | 1713 | .gfp_mask = GFP_KERNEL, |
@@ -1731,7 +1726,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
1731 | 1726 | ||
1732 | loop_again: | 1727 | loop_again: |
1733 | total_scanned = 0; | 1728 | total_scanned = 0; |
1734 | nr_reclaimed = 0; | 1729 | sc.nr_reclaimed = 0; |
1735 | sc.may_writepage = !laptop_mode; | 1730 | sc.may_writepage = !laptop_mode; |
1736 | count_vm_event(PAGEOUTRUN); | 1731 | count_vm_event(PAGEOUTRUN); |
1737 | 1732 | ||
@@ -1817,11 +1812,11 @@ loop_again: | |||
1817 | */ | 1812 | */ |
1818 | if (!zone_watermark_ok(zone, order, 8*zone->pages_high, | 1813 | if (!zone_watermark_ok(zone, order, 8*zone->pages_high, |
1819 | end_zone, 0)) | 1814 | end_zone, 0)) |
1820 | nr_reclaimed += shrink_zone(priority, zone, &sc); | 1815 | shrink_zone(priority, zone, &sc); |
1821 | reclaim_state->reclaimed_slab = 0; | 1816 | reclaim_state->reclaimed_slab = 0; |
1822 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 1817 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, |
1823 | lru_pages); | 1818 | lru_pages); |
1824 | nr_reclaimed += reclaim_state->reclaimed_slab; | 1819 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; |
1825 | total_scanned += sc.nr_scanned; | 1820 | total_scanned += sc.nr_scanned; |
1826 | if (zone_is_all_unreclaimable(zone)) | 1821 | if (zone_is_all_unreclaimable(zone)) |
1827 | continue; | 1822 | continue; |
@@ -1835,7 +1830,7 @@ loop_again: | |||
1835 | * even in laptop mode | 1830 | * even in laptop mode |
1836 | */ | 1831 | */ |
1837 | if (total_scanned > SWAP_CLUSTER_MAX * 2 && | 1832 | if (total_scanned > SWAP_CLUSTER_MAX * 2 && |
1838 | total_scanned > nr_reclaimed + nr_reclaimed / 2) | 1833 | total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) |
1839 | sc.may_writepage = 1; | 1834 | sc.may_writepage = 1; |
1840 | } | 1835 | } |
1841 | if (all_zones_ok) | 1836 | if (all_zones_ok) |
@@ -1853,7 +1848,7 @@ loop_again: | |||
1853 | * matches the direct reclaim path behaviour in terms of impact | 1848 | * matches the direct reclaim path behaviour in terms of impact |
1854 | * on zone->*_priority. | 1849 | * on zone->*_priority. |
1855 | */ | 1850 | */ |
1856 | if (nr_reclaimed >= SWAP_CLUSTER_MAX) | 1851 | if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) |
1857 | break; | 1852 | break; |
1858 | } | 1853 | } |
1859 | out: | 1854 | out: |
@@ -1872,10 +1867,27 @@ out: | |||
1872 | 1867 | ||
1873 | try_to_freeze(); | 1868 | try_to_freeze(); |
1874 | 1869 | ||
1870 | /* | ||
1871 | * Fragmentation may mean that the system cannot be | ||
1872 | * rebalanced for high-order allocations in all zones. | ||
1873 | * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, | ||
1874 | * it means the zones have been fully scanned and are still | ||
1875 | * not balanced. For high-order allocations, there is | ||
1876 | * little point trying all over again as kswapd may | ||
1877 | * infinite loop. | ||
1878 | * | ||
1879 | * Instead, recheck all watermarks at order-0 as they | ||
1880 | * are the most important. If watermarks are ok, kswapd will go | ||
1881 | * back to sleep. High-order users can still perform direct | ||
1882 | * reclaim if they wish. | ||
1883 | */ | ||
1884 | if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) | ||
1885 | order = sc.order = 0; | ||
1886 | |||
1875 | goto loop_again; | 1887 | goto loop_again; |
1876 | } | 1888 | } |
1877 | 1889 | ||
1878 | return nr_reclaimed; | 1890 | return sc.nr_reclaimed; |
1879 | } | 1891 | } |
1880 | 1892 | ||
1881 | /* | 1893 | /* |
@@ -2227,7 +2239,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2227 | struct task_struct *p = current; | 2239 | struct task_struct *p = current; |
2228 | struct reclaim_state reclaim_state; | 2240 | struct reclaim_state reclaim_state; |
2229 | int priority; | 2241 | int priority; |
2230 | unsigned long nr_reclaimed = 0; | ||
2231 | struct scan_control sc = { | 2242 | struct scan_control sc = { |
2232 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 2243 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), |
2233 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 2244 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), |
@@ -2260,9 +2271,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2260 | priority = ZONE_RECLAIM_PRIORITY; | 2271 | priority = ZONE_RECLAIM_PRIORITY; |
2261 | do { | 2272 | do { |
2262 | note_zone_scanning_priority(zone, priority); | 2273 | note_zone_scanning_priority(zone, priority); |
2263 | nr_reclaimed += shrink_zone(priority, zone, &sc); | 2274 | shrink_zone(priority, zone, &sc); |
2264 | priority--; | 2275 | priority--; |
2265 | } while (priority >= 0 && nr_reclaimed < nr_pages); | 2276 | } while (priority >= 0 && sc.nr_reclaimed < nr_pages); |
2266 | } | 2277 | } |
2267 | 2278 | ||
2268 | slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 2279 | slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); |
@@ -2286,13 +2297,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2286 | * Update nr_reclaimed by the number of slab pages we | 2297 | * Update nr_reclaimed by the number of slab pages we |
2287 | * reclaimed from this zone. | 2298 | * reclaimed from this zone. |
2288 | */ | 2299 | */ |
2289 | nr_reclaimed += slab_reclaimable - | 2300 | sc.nr_reclaimed += slab_reclaimable - |
2290 | zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 2301 | zone_page_state(zone, NR_SLAB_RECLAIMABLE); |
2291 | } | 2302 | } |
2292 | 2303 | ||
2293 | p->reclaim_state = NULL; | 2304 | p->reclaim_state = NULL; |
2294 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); | 2305 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); |
2295 | return nr_reclaimed >= nr_pages; | 2306 | return sc.nr_reclaimed >= nr_pages; |
2296 | } | 2307 | } |
2297 | 2308 | ||
2298 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 2309 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) |
@@ -2472,7 +2483,7 @@ void scan_mapping_unevictable_pages(struct address_space *mapping) | |||
2472 | * back onto @zone's unevictable list. | 2483 | * back onto @zone's unevictable list. |
2473 | */ | 2484 | */ |
2474 | #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ | 2485 | #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ |
2475 | void scan_zone_unevictable_pages(struct zone *zone) | 2486 | static void scan_zone_unevictable_pages(struct zone *zone) |
2476 | { | 2487 | { |
2477 | struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; | 2488 | struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; |
2478 | unsigned long scan; | 2489 | unsigned long scan; |
@@ -2514,7 +2525,7 @@ void scan_zone_unevictable_pages(struct zone *zone) | |||
2514 | * that has possibly/probably made some previously unevictable pages | 2525 | * that has possibly/probably made some previously unevictable pages |
2515 | * evictable. | 2526 | * evictable. |
2516 | */ | 2527 | */ |
2517 | void scan_all_zones_unevictable_pages(void) | 2528 | static void scan_all_zones_unevictable_pages(void) |
2518 | { | 2529 | { |
2519 | struct zone *zone; | 2530 | struct zone *zone; |
2520 | 2531 | ||