aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-05-29 18:06:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:19 -0400
commit23b9da55c5b0feb484bd5e8615f4eb1ce4169453 (patch)
tree84ce2cce4aca6c5e5ca4b7066faa21db1148b370 /mm/vmscan.c
parent41ac1999c3e3563e1810b14878a869c79c9368bb (diff)
mm: vmscan: remove reclaim_mode_t
There is little motiviation for reclaim_mode_t once RECLAIM_MODE_[A]SYNC and lumpy reclaim have been removed. This patch gets rid of reclaim_mode_t as well and improves the documentation about what reclaim/compaction is and when it is triggered. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Hugh Dickins <hughd@google.com> Cc: Ying Han <yinghan@google.com> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c72
1 files changed, 22 insertions, 50 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e27f27d4cc19..68e5819d0f1b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,16 +53,6 @@
53#define CREATE_TRACE_POINTS 53#define CREATE_TRACE_POINTS
54#include <trace/events/vmscan.h> 54#include <trace/events/vmscan.h>
55 55
56/*
57 * reclaim_mode determines how the inactive list is shrunk
58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
59 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
60 * order-0 pages and then compact the zone
61 */
62typedef unsigned __bitwise__ reclaim_mode_t;
63#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
64#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
65
66struct scan_control { 56struct scan_control {
67 /* Incremented by the number of inactive pages that were scanned */ 57 /* Incremented by the number of inactive pages that were scanned */
68 unsigned long nr_scanned; 58 unsigned long nr_scanned;
@@ -89,12 +79,6 @@ struct scan_control {
89 int order; 79 int order;
90 80
91 /* 81 /*
92 * Intend to reclaim enough continuous memory rather than reclaim
93 * enough amount of memory. i.e, mode for high order allocation.
94 */
95 reclaim_mode_t reclaim_mode;
96
97 /*
98 * The memory cgroup that hit its limit and as a result is the 82 * The memory cgroup that hit its limit and as a result is the
99 * primary target of this reclaim invocation. 83 * primary target of this reclaim invocation.
100 */ 84 */
@@ -356,25 +340,6 @@ out:
356 return ret; 340 return ret;
357} 341}
358 342
359static void set_reclaim_mode(int priority, struct scan_control *sc)
360{
361 /*
362 * Restrict reclaim/compaction to costly allocations or when
363 * under memory pressure
364 */
365 if (COMPACTION_BUILD && sc->order &&
366 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
367 priority < DEF_PRIORITY - 2))
368 sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
369 else
370 sc->reclaim_mode = RECLAIM_MODE_SINGLE;
371}
372
373static void reset_reclaim_mode(struct scan_control *sc)
374{
375 sc->reclaim_mode = RECLAIM_MODE_SINGLE;
376}
377
378static inline int is_page_cache_freeable(struct page *page) 343static inline int is_page_cache_freeable(struct page *page)
379{ 344{
380 /* 345 /*
@@ -497,8 +462,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
497 /* synchronous write or broken a_ops? */ 462 /* synchronous write or broken a_ops? */
498 ClearPageReclaim(page); 463 ClearPageReclaim(page);
499 } 464 }
500 trace_mm_vmscan_writepage(page, 465 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
501 trace_reclaim_flags(page, sc->reclaim_mode));
502 inc_zone_page_state(page, NR_VMSCAN_WRITE); 466 inc_zone_page_state(page, NR_VMSCAN_WRITE);
503 return PAGE_SUCCESS; 467 return PAGE_SUCCESS;
504 } 468 }
@@ -953,7 +917,6 @@ cull_mlocked:
953 try_to_free_swap(page); 917 try_to_free_swap(page);
954 unlock_page(page); 918 unlock_page(page);
955 putback_lru_page(page); 919 putback_lru_page(page);
956 reset_reclaim_mode(sc);
957 continue; 920 continue;
958 921
959activate_locked: 922activate_locked:
@@ -1348,8 +1311,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1348 return SWAP_CLUSTER_MAX; 1311 return SWAP_CLUSTER_MAX;
1349 } 1312 }
1350 1313
1351 set_reclaim_mode(priority, sc);
1352
1353 lru_add_drain(); 1314 lru_add_drain();
1354 1315
1355 if (!sc->may_unmap) 1316 if (!sc->may_unmap)
@@ -1433,7 +1394,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1433 zone_idx(zone), 1394 zone_idx(zone),
1434 nr_scanned, nr_reclaimed, 1395 nr_scanned, nr_reclaimed,
1435 priority, 1396 priority,
1436 trace_shrink_flags(file, sc->reclaim_mode)); 1397 trace_shrink_flags(file));
1437 return nr_reclaimed; 1398 return nr_reclaimed;
1438} 1399}
1439 1400
@@ -1512,8 +1473,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
1512 1473
1513 lru_add_drain(); 1474 lru_add_drain();
1514 1475
1515 reset_reclaim_mode(sc);
1516
1517 if (!sc->may_unmap) 1476 if (!sc->may_unmap)
1518 isolate_mode |= ISOLATE_UNMAPPED; 1477 isolate_mode |= ISOLATE_UNMAPPED;
1519 if (!sc->may_writepage) 1478 if (!sc->may_writepage)
@@ -1826,23 +1785,35 @@ out:
1826 } 1785 }
1827} 1786}
1828 1787
1788/* Use reclaim/compaction for costly allocs or under memory pressure */
1789static bool in_reclaim_compaction(int priority, struct scan_control *sc)
1790{
1791 if (COMPACTION_BUILD && sc->order &&
1792 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
1793 priority < DEF_PRIORITY - 2))
1794 return true;
1795
1796 return false;
1797}
1798
1829/* 1799/*
1830 * Reclaim/compaction depends on a number of pages being freed. To avoid 1800 * Reclaim/compaction is used for high-order allocation requests. It reclaims
1831 * disruption to the system, a small number of order-0 pages continue to be 1801 * order-0 pages before compacting the zone. should_continue_reclaim() returns
1832 * rotated and reclaimed in the normal fashion. However, by the time we get 1802 * true if more pages should be reclaimed such that when the page allocator
1833 * back to the allocator and call try_to_compact_zone(), we ensure that 1803 * calls try_to_compact_zone() that it will have enough free pages to succeed.
1834 * there are enough free pages for it to be likely successful 1804 * It will give up earlier than that if there is difficulty reclaiming pages.
1835 */ 1805 */
1836static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, 1806static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1837 unsigned long nr_reclaimed, 1807 unsigned long nr_reclaimed,
1838 unsigned long nr_scanned, 1808 unsigned long nr_scanned,
1809 int priority,
1839 struct scan_control *sc) 1810 struct scan_control *sc)
1840{ 1811{
1841 unsigned long pages_for_compaction; 1812 unsigned long pages_for_compaction;
1842 unsigned long inactive_lru_pages; 1813 unsigned long inactive_lru_pages;
1843 1814
1844 /* If not in reclaim/compaction mode, stop */ 1815 /* If not in reclaim/compaction mode, stop */
1845 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1816 if (!in_reclaim_compaction(priority, sc))
1846 return false; 1817 return false;
1847 1818
1848 /* Consider stopping depending on scan and reclaim activity */ 1819 /* Consider stopping depending on scan and reclaim activity */
@@ -1944,7 +1915,8 @@ restart:
1944 1915
1945 /* reclaim/compaction might need reclaim to continue */ 1916 /* reclaim/compaction might need reclaim to continue */
1946 if (should_continue_reclaim(mz, nr_reclaimed, 1917 if (should_continue_reclaim(mz, nr_reclaimed,
1947 sc->nr_scanned - nr_scanned, sc)) 1918 sc->nr_scanned - nr_scanned,
1919 priority, sc))
1948 goto restart; 1920 goto restart;
1949 1921
1950 throttle_vm_writeout(sc->gfp_mask); 1922 throttle_vm_writeout(sc->gfp_mask);