aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c213
1 files changed, 113 insertions, 100 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3ff3311447f5..915dceb487c1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -73,10 +73,14 @@ struct scan_control {
73 73
74 int swappiness; 74 int swappiness;
75 75
76 int all_unreclaimable;
77
78 int order; 76 int order;
79 77
78 /*
79 * Intend to reclaim enough contenious memory rather than to reclaim
80 * enough amount memory. I.e, it's the mode for high order allocation.
81 */
82 bool lumpy_reclaim_mode;
83
80 /* Which cgroup do we reclaim from */ 84 /* Which cgroup do we reclaim from */
81 struct mem_cgroup *mem_cgroup; 85 struct mem_cgroup *mem_cgroup;
82 86
@@ -85,12 +89,6 @@ struct scan_control {
85 * are scanned. 89 * are scanned.
86 */ 90 */
87 nodemask_t *nodemask; 91 nodemask_t *nodemask;
88
89 /* Pluggable isolate pages callback */
90 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
91 unsigned long *scanned, int order, int mode,
92 struct zone *z, struct mem_cgroup *mem_cont,
93 int active, int file);
94}; 92};
95 93
96#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 94#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -575,7 +573,7 @@ static enum page_references page_check_references(struct page *page,
575 referenced_page = TestClearPageReferenced(page); 573 referenced_page = TestClearPageReferenced(page);
576 574
577 /* Lumpy reclaim - ignore references */ 575 /* Lumpy reclaim - ignore references */
578 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 576 if (sc->lumpy_reclaim_mode)
579 return PAGEREF_RECLAIM; 577 return PAGEREF_RECLAIM;
580 578
581 /* 579 /*
@@ -839,11 +837,6 @@ keep:
839 return nr_reclaimed; 837 return nr_reclaimed;
840} 838}
841 839
842/* LRU Isolation modes. */
843#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
844#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
845#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
846
847/* 840/*
848 * Attempt to remove the specified page from its LRU. Only take this page 841 * Attempt to remove the specified page from its LRU. Only take this page
849 * if it is of the appropriate PageActive status. Pages which are being 842 * if it is of the appropriate PageActive status. Pages which are being
@@ -1011,7 +1004,6 @@ static unsigned long isolate_pages_global(unsigned long nr,
1011 struct list_head *dst, 1004 struct list_head *dst,
1012 unsigned long *scanned, int order, 1005 unsigned long *scanned, int order,
1013 int mode, struct zone *z, 1006 int mode, struct zone *z,
1014 struct mem_cgroup *mem_cont,
1015 int active, int file) 1007 int active, int file)
1016{ 1008{
1017 int lru = LRU_BASE; 1009 int lru = LRU_BASE;
@@ -1130,7 +1122,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1130 unsigned long nr_scanned = 0; 1122 unsigned long nr_scanned = 0;
1131 unsigned long nr_reclaimed = 0; 1123 unsigned long nr_reclaimed = 0;
1132 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1124 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1133 int lumpy_reclaim = 0;
1134 1125
1135 while (unlikely(too_many_isolated(zone, file, sc))) { 1126 while (unlikely(too_many_isolated(zone, file, sc))) {
1136 congestion_wait(BLK_RW_ASYNC, HZ/10); 1127 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1140,17 +1131,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1140 return SWAP_CLUSTER_MAX; 1131 return SWAP_CLUSTER_MAX;
1141 } 1132 }
1142 1133
1143 /*
1144 * If we need a large contiguous chunk of memory, or have
1145 * trouble getting a small set of contiguous pages, we
1146 * will reclaim both active and inactive pages.
1147 *
1148 * We use the same threshold as pageout congestion_wait below.
1149 */
1150 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1151 lumpy_reclaim = 1;
1152 else if (sc->order && priority < DEF_PRIORITY - 2)
1153 lumpy_reclaim = 1;
1154 1134
1155 pagevec_init(&pvec, 1); 1135 pagevec_init(&pvec, 1);
1156 1136
@@ -1163,15 +1143,15 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1163 unsigned long nr_freed; 1143 unsigned long nr_freed;
1164 unsigned long nr_active; 1144 unsigned long nr_active;
1165 unsigned int count[NR_LRU_LISTS] = { 0, }; 1145 unsigned int count[NR_LRU_LISTS] = { 0, };
1166 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; 1146 int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
1167 unsigned long nr_anon; 1147 unsigned long nr_anon;
1168 unsigned long nr_file; 1148 unsigned long nr_file;
1169 1149
1170 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
1171 &page_list, &nr_scan, sc->order, mode,
1172 zone, sc->mem_cgroup, 0, file);
1173
1174 if (scanning_global_lru(sc)) { 1150 if (scanning_global_lru(sc)) {
1151 nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX,
1152 &page_list, &nr_scan,
1153 sc->order, mode,
1154 zone, 0, file);
1175 zone->pages_scanned += nr_scan; 1155 zone->pages_scanned += nr_scan;
1176 if (current_is_kswapd()) 1156 if (current_is_kswapd())
1177 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1157 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1179,6 +1159,16 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1179 else 1159 else
1180 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1160 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1181 nr_scan); 1161 nr_scan);
1162 } else {
1163 nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX,
1164 &page_list, &nr_scan,
1165 sc->order, mode,
1166 zone, sc->mem_cgroup,
1167 0, file);
1168 /*
1169 * mem_cgroup_isolate_pages() keeps track of
1170 * scanned pages on its own.
1171 */
1182 } 1172 }
1183 1173
1184 if (nr_taken == 0) 1174 if (nr_taken == 0)
@@ -1216,7 +1206,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1216 * but that should be acceptable to the caller 1206 * but that should be acceptable to the caller
1217 */ 1207 */
1218 if (nr_freed < nr_taken && !current_is_kswapd() && 1208 if (nr_freed < nr_taken && !current_is_kswapd() &&
1219 lumpy_reclaim) { 1209 sc->lumpy_reclaim_mode) {
1220 congestion_wait(BLK_RW_ASYNC, HZ/10); 1210 congestion_wait(BLK_RW_ASYNC, HZ/10);
1221 1211
1222 /* 1212 /*
@@ -1356,16 +1346,23 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1356 1346
1357 lru_add_drain(); 1347 lru_add_drain();
1358 spin_lock_irq(&zone->lru_lock); 1348 spin_lock_irq(&zone->lru_lock);
1359 nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1360 ISOLATE_ACTIVE, zone,
1361 sc->mem_cgroup, 1, file);
1362 /*
1363 * zone->pages_scanned is used for detect zone's oom
1364 * mem_cgroup remembers nr_scan by itself.
1365 */
1366 if (scanning_global_lru(sc)) { 1349 if (scanning_global_lru(sc)) {
1350 nr_taken = isolate_pages_global(nr_pages, &l_hold,
1351 &pgscanned, sc->order,
1352 ISOLATE_ACTIVE, zone,
1353 1, file);
1367 zone->pages_scanned += pgscanned; 1354 zone->pages_scanned += pgscanned;
1355 } else {
1356 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1357 &pgscanned, sc->order,
1358 ISOLATE_ACTIVE, zone,
1359 sc->mem_cgroup, 1, file);
1360 /*
1361 * mem_cgroup_isolate_pages() keeps track of
1362 * scanned pages on its own.
1363 */
1368 } 1364 }
1365
1369 reclaim_stat->recent_scanned[file] += nr_taken; 1366 reclaim_stat->recent_scanned[file] += nr_taken;
1370 1367
1371 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1368 __count_zone_vm_events(PGREFILL, zone, pgscanned);
@@ -1519,21 +1516,52 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1519} 1516}
1520 1517
1521/* 1518/*
1519 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1520 * until we collected @swap_cluster_max pages to scan.
1521 */
1522static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1523 unsigned long *nr_saved_scan)
1524{
1525 unsigned long nr;
1526
1527 *nr_saved_scan += nr_to_scan;
1528 nr = *nr_saved_scan;
1529
1530 if (nr >= SWAP_CLUSTER_MAX)
1531 *nr_saved_scan = 0;
1532 else
1533 nr = 0;
1534
1535 return nr;
1536}
1537
1538/*
1522 * Determine how aggressively the anon and file LRU lists should be 1539 * Determine how aggressively the anon and file LRU lists should be
1523 * scanned. The relative value of each set of LRU lists is determined 1540 * scanned. The relative value of each set of LRU lists is determined
1524 * by looking at the fraction of the pages scanned we did rotate back 1541 * by looking at the fraction of the pages scanned we did rotate back
1525 * onto the active list instead of evict. 1542 * onto the active list instead of evict.
1526 * 1543 *
1527 * percent[0] specifies how much pressure to put on ram/swap backed 1544 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1528 * memory, while percent[1] determines pressure on the file LRUs.
1529 */ 1545 */
1530static void get_scan_ratio(struct zone *zone, struct scan_control *sc, 1546static void get_scan_count(struct zone *zone, struct scan_control *sc,
1531 unsigned long *percent) 1547 unsigned long *nr, int priority)
1532{ 1548{
1533 unsigned long anon, file, free; 1549 unsigned long anon, file, free;
1534 unsigned long anon_prio, file_prio; 1550 unsigned long anon_prio, file_prio;
1535 unsigned long ap, fp; 1551 unsigned long ap, fp;
1536 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1552 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1553 u64 fraction[2], denominator;
1554 enum lru_list l;
1555 int noswap = 0;
1556
1557 /* If we have no swap space, do not bother scanning anon pages. */
1558 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1559 noswap = 1;
1560 fraction[0] = 0;
1561 fraction[1] = 1;
1562 denominator = 1;
1563 goto out;
1564 }
1537 1565
1538 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1566 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1539 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1567 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
@@ -1545,9 +1573,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1545 /* If we have very few page cache pages, 1573 /* If we have very few page cache pages,
1546 force-scan anon pages. */ 1574 force-scan anon pages. */
1547 if (unlikely(file + free <= high_wmark_pages(zone))) { 1575 if (unlikely(file + free <= high_wmark_pages(zone))) {
1548 percent[0] = 100; 1576 fraction[0] = 1;
1549 percent[1] = 0; 1577 fraction[1] = 0;
1550 return; 1578 denominator = 1;
1579 goto out;
1551 } 1580 }
1552 } 1581 }
1553 1582
@@ -1594,29 +1623,37 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1594 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1623 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1595 fp /= reclaim_stat->recent_rotated[1] + 1; 1624 fp /= reclaim_stat->recent_rotated[1] + 1;
1596 1625
1597 /* Normalize to percentages */ 1626 fraction[0] = ap;
1598 percent[0] = 100 * ap / (ap + fp + 1); 1627 fraction[1] = fp;
1599 percent[1] = 100 - percent[0]; 1628 denominator = ap + fp + 1;
1629out:
1630 for_each_evictable_lru(l) {
1631 int file = is_file_lru(l);
1632 unsigned long scan;
1633
1634 scan = zone_nr_lru_pages(zone, sc, l);
1635 if (priority || noswap) {
1636 scan >>= priority;
1637 scan = div64_u64(scan * fraction[file], denominator);
1638 }
1639 nr[l] = nr_scan_try_batch(scan,
1640 &reclaim_stat->nr_saved_scan[l]);
1641 }
1600} 1642}
1601 1643
1602/* 1644static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
1603 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1604 * until we collected @swap_cluster_max pages to scan.
1605 */
1606static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1607 unsigned long *nr_saved_scan)
1608{ 1645{
1609 unsigned long nr; 1646 /*
1610 1647 * If we need a large contiguous chunk of memory, or have
1611 *nr_saved_scan += nr_to_scan; 1648 * trouble getting a small set of contiguous pages, we
1612 nr = *nr_saved_scan; 1649 * will reclaim both active and inactive pages.
1613 1650 */
1614 if (nr >= SWAP_CLUSTER_MAX) 1651 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1615 *nr_saved_scan = 0; 1652 sc->lumpy_reclaim_mode = 1;
1653 else if (sc->order && priority < DEF_PRIORITY - 2)
1654 sc->lumpy_reclaim_mode = 1;
1616 else 1655 else
1617 nr = 0; 1656 sc->lumpy_reclaim_mode = 0;
1618
1619 return nr;
1620} 1657}
1621 1658
1622/* 1659/*
@@ -1627,33 +1664,13 @@ static void shrink_zone(int priority, struct zone *zone,
1627{ 1664{
1628 unsigned long nr[NR_LRU_LISTS]; 1665 unsigned long nr[NR_LRU_LISTS];
1629 unsigned long nr_to_scan; 1666 unsigned long nr_to_scan;
1630 unsigned long percent[2]; /* anon @ 0; file @ 1 */
1631 enum lru_list l; 1667 enum lru_list l;
1632 unsigned long nr_reclaimed = sc->nr_reclaimed; 1668 unsigned long nr_reclaimed = sc->nr_reclaimed;
1633 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1669 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1634 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1635 int noswap = 0;
1636
1637 /* If we have no swap space, do not bother scanning anon pages. */
1638 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1639 noswap = 1;
1640 percent[0] = 0;
1641 percent[1] = 100;
1642 } else
1643 get_scan_ratio(zone, sc, percent);
1644 1670
1645 for_each_evictable_lru(l) { 1671 get_scan_count(zone, sc, nr, priority);
1646 int file = is_file_lru(l);
1647 unsigned long scan;
1648 1672
1649 scan = zone_nr_lru_pages(zone, sc, l); 1673 set_lumpy_reclaim_mode(priority, sc);
1650 if (priority || noswap) {
1651 scan >>= priority;
1652 scan = (scan * percent[file]) / 100;
1653 }
1654 nr[l] = nr_scan_try_batch(scan,
1655 &reclaim_stat->nr_saved_scan[l]);
1656 }
1657 1674
1658 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1675 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1659 nr[LRU_INACTIVE_FILE]) { 1676 nr[LRU_INACTIVE_FILE]) {
@@ -1707,14 +1724,14 @@ static void shrink_zone(int priority, struct zone *zone,
1707 * If a zone is deemed to be full of pinned pages then just give it a light 1724 * If a zone is deemed to be full of pinned pages then just give it a light
1708 * scan then give up on it. 1725 * scan then give up on it.
1709 */ 1726 */
1710static void shrink_zones(int priority, struct zonelist *zonelist, 1727static int shrink_zones(int priority, struct zonelist *zonelist,
1711 struct scan_control *sc) 1728 struct scan_control *sc)
1712{ 1729{
1713 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1730 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1714 struct zoneref *z; 1731 struct zoneref *z;
1715 struct zone *zone; 1732 struct zone *zone;
1733 int progress = 0;
1716 1734
1717 sc->all_unreclaimable = 1;
1718 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 1735 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1719 sc->nodemask) { 1736 sc->nodemask) {
1720 if (!populated_zone(zone)) 1737 if (!populated_zone(zone))
@@ -1730,19 +1747,19 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1730 1747
1731 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1748 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1732 continue; /* Let kswapd poll it */ 1749 continue; /* Let kswapd poll it */
1733 sc->all_unreclaimable = 0;
1734 } else { 1750 } else {
1735 /* 1751 /*
1736 * Ignore cpuset limitation here. We just want to reduce 1752 * Ignore cpuset limitation here. We just want to reduce
1737 * # of used pages by us regardless of memory shortage. 1753 * # of used pages by us regardless of memory shortage.
1738 */ 1754 */
1739 sc->all_unreclaimable = 0;
1740 mem_cgroup_note_reclaim_priority(sc->mem_cgroup, 1755 mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1741 priority); 1756 priority);
1742 } 1757 }
1743 1758
1744 shrink_zone(priority, zone, sc); 1759 shrink_zone(priority, zone, sc);
1760 progress = 1;
1745 } 1761 }
1762 return progress;
1746} 1763}
1747 1764
1748/* 1765/*
@@ -1774,6 +1791,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1774 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1791 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1775 unsigned long writeback_threshold; 1792 unsigned long writeback_threshold;
1776 1793
1794 get_mems_allowed();
1777 delayacct_freepages_start(); 1795 delayacct_freepages_start();
1778 1796
1779 if (scanning_global_lru(sc)) 1797 if (scanning_global_lru(sc))
@@ -1795,7 +1813,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1795 sc->nr_scanned = 0; 1813 sc->nr_scanned = 0;
1796 if (!priority) 1814 if (!priority)
1797 disable_swap_token(); 1815 disable_swap_token();
1798 shrink_zones(priority, zonelist, sc); 1816 ret = shrink_zones(priority, zonelist, sc);
1799 /* 1817 /*
1800 * Don't shrink slabs when reclaiming memory from 1818 * Don't shrink slabs when reclaiming memory from
1801 * over limit cgroups 1819 * over limit cgroups
@@ -1832,7 +1850,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1832 congestion_wait(BLK_RW_ASYNC, HZ/10); 1850 congestion_wait(BLK_RW_ASYNC, HZ/10);
1833 } 1851 }
1834 /* top priority shrink_zones still had more to do? don't OOM, then */ 1852 /* top priority shrink_zones still had more to do? don't OOM, then */
1835 if (!sc->all_unreclaimable && scanning_global_lru(sc)) 1853 if (ret && scanning_global_lru(sc))
1836 ret = sc->nr_reclaimed; 1854 ret = sc->nr_reclaimed;
1837out: 1855out:
1838 /* 1856 /*
@@ -1857,6 +1875,7 @@ out:
1857 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); 1875 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1858 1876
1859 delayacct_freepages_end(); 1877 delayacct_freepages_end();
1878 put_mems_allowed();
1860 1879
1861 return ret; 1880 return ret;
1862} 1881}
@@ -1873,7 +1892,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1873 .swappiness = vm_swappiness, 1892 .swappiness = vm_swappiness,
1874 .order = order, 1893 .order = order,
1875 .mem_cgroup = NULL, 1894 .mem_cgroup = NULL,
1876 .isolate_pages = isolate_pages_global,
1877 .nodemask = nodemask, 1895 .nodemask = nodemask,
1878 }; 1896 };
1879 1897
@@ -1894,7 +1912,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1894 .swappiness = swappiness, 1912 .swappiness = swappiness,
1895 .order = 0, 1913 .order = 0,
1896 .mem_cgroup = mem, 1914 .mem_cgroup = mem,
1897 .isolate_pages = mem_cgroup_isolate_pages,
1898 }; 1915 };
1899 nodemask_t nm = nodemask_of_node(nid); 1916 nodemask_t nm = nodemask_of_node(nid);
1900 1917
@@ -1928,7 +1945,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1928 .swappiness = swappiness, 1945 .swappiness = swappiness,
1929 .order = 0, 1946 .order = 0,
1930 .mem_cgroup = mem_cont, 1947 .mem_cgroup = mem_cont,
1931 .isolate_pages = mem_cgroup_isolate_pages,
1932 .nodemask = NULL, /* we don't care the placement */ 1948 .nodemask = NULL, /* we don't care the placement */
1933 }; 1949 };
1934 1950
@@ -2006,7 +2022,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
2006 .swappiness = vm_swappiness, 2022 .swappiness = vm_swappiness,
2007 .order = order, 2023 .order = order,
2008 .mem_cgroup = NULL, 2024 .mem_cgroup = NULL,
2009 .isolate_pages = isolate_pages_global,
2010 }; 2025 };
2011 /* 2026 /*
2012 * temp_priority is used to remember the scanning priority at which 2027 * temp_priority is used to remember the scanning priority at which
@@ -2385,7 +2400,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2385 .hibernation_mode = 1, 2400 .hibernation_mode = 1,
2386 .swappiness = vm_swappiness, 2401 .swappiness = vm_swappiness,
2387 .order = 0, 2402 .order = 0,
2388 .isolate_pages = isolate_pages_global,
2389 }; 2403 };
2390 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2404 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2391 struct task_struct *p = current; 2405 struct task_struct *p = current;
@@ -2570,7 +2584,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2570 .gfp_mask = gfp_mask, 2584 .gfp_mask = gfp_mask,
2571 .swappiness = vm_swappiness, 2585 .swappiness = vm_swappiness,
2572 .order = order, 2586 .order = order,
2573 .isolate_pages = isolate_pages_global,
2574 }; 2587 };
2575 unsigned long slab_reclaimable; 2588 unsigned long slab_reclaimable;
2576 2589