diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 1 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 23 |
3 files changed, 5 insertions, 25 deletions
diff --git a/mm/internal.h b/mm/internal.h index 612c14f5e0f5..29e1e761f9eb 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -83,7 +83,6 @@ extern unsigned long highest_memmap_pfn; | |||
83 | */ | 83 | */ |
84 | extern int isolate_lru_page(struct page *page); | 84 | extern int isolate_lru_page(struct page *page); |
85 | extern void putback_lru_page(struct page *page); | 85 | extern void putback_lru_page(struct page *page); |
86 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | ||
87 | extern bool zone_reclaimable(struct zone *zone); | 86 | extern bool zone_reclaimable(struct zone *zone); |
88 | 87 | ||
89 | /* | 88 | /* |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 61119b8a11e6..2d30e2cfe804 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -205,7 +205,8 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) | |||
205 | nr_pages = zone_page_state(zone, NR_FREE_PAGES); | 205 | nr_pages = zone_page_state(zone, NR_FREE_PAGES); |
206 | nr_pages -= min(nr_pages, zone->dirty_balance_reserve); | 206 | nr_pages -= min(nr_pages, zone->dirty_balance_reserve); |
207 | 207 | ||
208 | nr_pages += zone_reclaimable_pages(zone); | 208 | nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); |
209 | nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); | ||
209 | 210 | ||
210 | return nr_pages; | 211 | return nr_pages; |
211 | } | 212 | } |
@@ -258,7 +259,8 @@ static unsigned long global_dirtyable_memory(void) | |||
258 | x = global_page_state(NR_FREE_PAGES); | 259 | x = global_page_state(NR_FREE_PAGES); |
259 | x -= min(x, dirty_balance_reserve); | 260 | x -= min(x, dirty_balance_reserve); |
260 | 261 | ||
261 | x += global_reclaimable_pages(); | 262 | x += global_page_state(NR_INACTIVE_FILE); |
263 | x += global_page_state(NR_ACTIVE_FILE); | ||
262 | 264 | ||
263 | if (!vm_highmem_is_dirtyable) | 265 | if (!vm_highmem_is_dirtyable) |
264 | x -= highmem_dirtyable_memory(x); | 266 | x -= highmem_dirtyable_memory(x); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 90c4075d8d75..a9c74b409681 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc) | |||
147 | } | 147 | } |
148 | #endif | 148 | #endif |
149 | 149 | ||
150 | unsigned long zone_reclaimable_pages(struct zone *zone) | 150 | static unsigned long zone_reclaimable_pages(struct zone *zone) |
151 | { | 151 | { |
152 | int nr; | 152 | int nr; |
153 | 153 | ||
@@ -3315,27 +3315,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) | |||
3315 | wake_up_interruptible(&pgdat->kswapd_wait); | 3315 | wake_up_interruptible(&pgdat->kswapd_wait); |
3316 | } | 3316 | } |
3317 | 3317 | ||
3318 | /* | ||
3319 | * The reclaimable count would be mostly accurate. | ||
3320 | * The less reclaimable pages may be | ||
3321 | * - mlocked pages, which will be moved to unevictable list when encountered | ||
3322 | * - mapped pages, which may require several travels to be reclaimed | ||
3323 | * - dirty pages, which is not "instantly" reclaimable | ||
3324 | */ | ||
3325 | unsigned long global_reclaimable_pages(void) | ||
3326 | { | ||
3327 | int nr; | ||
3328 | |||
3329 | nr = global_page_state(NR_ACTIVE_FILE) + | ||
3330 | global_page_state(NR_INACTIVE_FILE); | ||
3331 | |||
3332 | if (get_nr_swap_pages() > 0) | ||
3333 | nr += global_page_state(NR_ACTIVE_ANON) + | ||
3334 | global_page_state(NR_INACTIVE_ANON); | ||
3335 | |||
3336 | return nr; | ||
3337 | } | ||
3338 | |||
3339 | #ifdef CONFIG_HIBERNATION | 3318 | #ifdef CONFIG_HIBERNATION |
3340 | /* | 3319 | /* |
3341 | * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of | 3320 | * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of |