aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/swap.c b/mm/swap.c
index bc58c1369dd6..2152e48a7b8f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,8 @@
31#include <linux/backing-dev.h> 31#include <linux/backing-dev.h>
32#include <linux/memcontrol.h> 32#include <linux/memcontrol.h>
33 33
34#include "internal.h"
35
34/* How many pages do we try to swap or page in/out together? */ 36/* How many pages do we try to swap or page in/out together? */
35int page_cluster; 37int page_cluster;
36 38
@@ -244,6 +246,25 @@ void add_page_to_unevictable_list(struct page *page)
244 spin_unlock_irq(&zone->lru_lock); 246 spin_unlock_irq(&zone->lru_lock);
245} 247}
246 248
249/**
250 * lru_cache_add_active_or_unevictable
251 * @page: the page to be added to LRU
252 * @vma: vma in which page is mapped for determining reclaimability
253 *
254 * place @page on active or unevictable LRU list, depending on
255 * page_evictable(). Note that if the page is not evictable,
256 * it goes directly back onto it's zone's unevictable list. It does
257 * NOT use a per cpu pagevec.
258 */
259void lru_cache_add_active_or_unevictable(struct page *page,
260 struct vm_area_struct *vma)
261{
262 if (page_evictable(page, vma))
263 lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
264 else
265 add_page_to_unevictable_list(page);
266}
267
247/* 268/*
248 * Drain pages out of the cpu's pagevecs. 269 * Drain pages out of the cpu's pagevecs.
249 * Either "cpu" is the current CPU, and preemption has already been 270 * Either "cpu" is the current CPU, and preemption has already been