aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/vmscan.c49
2 files changed, 18 insertions, 37 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index bcd929093e64..5a06d4cb9a3d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -202,7 +202,8 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
202 nr_pages = zone_page_state(zone, NR_FREE_PAGES); 202 nr_pages = zone_page_state(zone, NR_FREE_PAGES);
203 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); 203 nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
204 204
205 nr_pages += zone_reclaimable_pages(zone); 205 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
206 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
206 207
207 return nr_pages; 208 return nr_pages;
208} 209}
@@ -255,7 +256,8 @@ static unsigned long global_dirtyable_memory(void)
255 x = global_page_state(NR_FREE_PAGES); 256 x = global_page_state(NR_FREE_PAGES);
256 x -= min(x, dirty_balance_reserve); 257 x -= min(x, dirty_balance_reserve);
257 258
258 x += global_reclaimable_pages(); 259 x += global_page_state(NR_INACTIVE_FILE);
260 x += global_page_state(NR_ACTIVE_FILE);
259 261
260 if (!vm_highmem_is_dirtyable) 262 if (!vm_highmem_is_dirtyable)
261 x -= highmem_dirtyable_memory(x); 263 x -= highmem_dirtyable_memory(x);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7dbdb6afd101..43ddef3cf44f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2117,6 +2117,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2117 return aborted_reclaim; 2117 return aborted_reclaim;
2118} 2118}
2119 2119
2120static unsigned long zone_reclaimable_pages(struct zone *zone)
2121{
2122 int nr;
2123
2124 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2125 zone_page_state(zone, NR_INACTIVE_FILE);
2126
2127 if (get_nr_swap_pages() > 0)
2128 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2129 zone_page_state(zone, NR_INACTIVE_ANON);
2130
2131 return nr;
2132}
2133
2120static bool zone_reclaimable(struct zone *zone) 2134static bool zone_reclaimable(struct zone *zone)
2121{ 2135{
2122 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 2136 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
@@ -3075,41 +3089,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3075 wake_up_interruptible(&pgdat->kswapd_wait); 3089 wake_up_interruptible(&pgdat->kswapd_wait);
3076} 3090}
3077 3091
3078/*
3079 * The reclaimable count would be mostly accurate.
3080 * The less reclaimable pages may be
3081 * - mlocked pages, which will be moved to unevictable list when encountered
3082 * - mapped pages, which may require several travels to be reclaimed
3083 * - dirty pages, which is not "instantly" reclaimable
3084 */
3085unsigned long global_reclaimable_pages(void)
3086{
3087 int nr;
3088
3089 nr = global_page_state(NR_ACTIVE_FILE) +
3090 global_page_state(NR_INACTIVE_FILE);
3091
3092 if (get_nr_swap_pages() > 0)
3093 nr += global_page_state(NR_ACTIVE_ANON) +
3094 global_page_state(NR_INACTIVE_ANON);
3095
3096 return nr;
3097}
3098
3099unsigned long zone_reclaimable_pages(struct zone *zone)
3100{
3101 int nr;
3102
3103 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
3104 zone_page_state(zone, NR_INACTIVE_FILE);
3105
3106 if (get_nr_swap_pages() > 0)
3107 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
3108 zone_page_state(zone, NR_INACTIVE_ANON);
3109
3110 return nr;
3111}
3112
3113#ifdef CONFIG_HIBERNATION 3092#ifdef CONFIG_HIBERNATION
3114/* 3093/*
3115 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3094 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of