aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 0b1974a08974..fee6b973f143 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -115,7 +115,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
115 zone = pagezone; 115 zone = pagezone;
116 spin_lock(&zone->lru_lock); 116 spin_lock(&zone->lru_lock);
117 } 117 }
118 if (PageLRU(page) && !PageActive(page)) { 118 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
119 int lru = page_is_file_cache(page); 119 int lru = page_is_file_cache(page);
120 list_move_tail(&page->lru, &zone->lru[lru].list); 120 list_move_tail(&page->lru, &zone->lru[lru].list);
121 pgmoved++; 121 pgmoved++;
@@ -136,7 +136,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
136void rotate_reclaimable_page(struct page *page) 136void rotate_reclaimable_page(struct page *page)
137{ 137{
138 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 138 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
139 PageLRU(page)) { 139 !PageUnevictable(page) && PageLRU(page)) {
140 struct pagevec *pvec; 140 struct pagevec *pvec;
141 unsigned long flags; 141 unsigned long flags;
142 142
@@ -157,7 +157,7 @@ void activate_page(struct page *page)
157 struct zone *zone = page_zone(page); 157 struct zone *zone = page_zone(page);
158 158
159 spin_lock_irq(&zone->lru_lock); 159 spin_lock_irq(&zone->lru_lock);
160 if (PageLRU(page) && !PageActive(page)) { 160 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
161 int file = page_is_file_cache(page); 161 int file = page_is_file_cache(page);
162 int lru = LRU_BASE + file; 162 int lru = LRU_BASE + file;
163 del_page_from_lru_list(zone, page, lru); 163 del_page_from_lru_list(zone, page, lru);
@@ -166,7 +166,7 @@ void activate_page(struct page *page)
166 lru += LRU_ACTIVE; 166 lru += LRU_ACTIVE;
167 add_page_to_lru_list(zone, page, lru); 167 add_page_to_lru_list(zone, page, lru);
168 __count_vm_event(PGACTIVATE); 168 __count_vm_event(PGACTIVATE);
169 mem_cgroup_move_lists(page, true); 169 mem_cgroup_move_lists(page, lru);
170 170
171 zone->recent_rotated[!!file]++; 171 zone->recent_rotated[!!file]++;
172 zone->recent_scanned[!!file]++; 172 zone->recent_scanned[!!file]++;
@@ -183,7 +183,8 @@ void activate_page(struct page *page)
183 */ 183 */
184void mark_page_accessed(struct page *page) 184void mark_page_accessed(struct page *page)
185{ 185{
186 if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { 186 if (!PageActive(page) && !PageUnevictable(page) &&
187 PageReferenced(page) && PageLRU(page)) {
187 activate_page(page); 188 activate_page(page);
188 ClearPageReferenced(page); 189 ClearPageReferenced(page);
189 } else if (!PageReferenced(page)) { 190 } else if (!PageReferenced(page)) {
@@ -211,13 +212,38 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
211void lru_cache_add_lru(struct page *page, enum lru_list lru) 212void lru_cache_add_lru(struct page *page, enum lru_list lru)
212{ 213{
213 if (PageActive(page)) { 214 if (PageActive(page)) {
215 VM_BUG_ON(PageUnevictable(page));
214 ClearPageActive(page); 216 ClearPageActive(page);
217 } else if (PageUnevictable(page)) {
218 VM_BUG_ON(PageActive(page));
219 ClearPageUnevictable(page);
215 } 220 }
216 221
217 VM_BUG_ON(PageLRU(page) || PageActive(page)); 222 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
218 __lru_cache_add(page, lru); 223 __lru_cache_add(page, lru);
219} 224}
220 225
226/**
227 * add_page_to_unevictable_list - add a page to the unevictable list
228 * @page: the page to be added to the unevictable list
229 *
230 * Add page directly to its zone's unevictable list. To avoid races with
231 * tasks that might be making the page evictable, through eg. munlock,
232 * munmap or exit, while it's not on the lru, we want to add the page
233 * while it's locked or otherwise "invisible" to other tasks. This is
234 * difficult to do when using the pagevec cache, so bypass that.
235 */
236void add_page_to_unevictable_list(struct page *page)
237{
238 struct zone *zone = page_zone(page);
239
240 spin_lock_irq(&zone->lru_lock);
241 SetPageUnevictable(page);
242 SetPageLRU(page);
243 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
244 spin_unlock_irq(&zone->lru_lock);
245}
246
221/* 247/*
222 * Drain pages out of the cpu's pagevecs. 248 * Drain pages out of the cpu's pagevecs.
223 * Either "cpu" is the current CPU, and preemption has already been 249 * Either "cpu" is the current CPU, and preemption has already been
@@ -316,6 +342,7 @@ void release_pages(struct page **pages, int nr, int cold)
316 342
317 if (PageLRU(page)) { 343 if (PageLRU(page)) {
318 struct zone *pagezone = page_zone(page); 344 struct zone *pagezone = page_zone(page);
345
319 if (pagezone != zone) { 346 if (pagezone != zone) {
320 if (zone) 347 if (zone)
321 spin_unlock_irqrestore(&zone->lru_lock, 348 spin_unlock_irqrestore(&zone->lru_lock,
@@ -392,6 +419,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
392{ 419{
393 int i; 420 int i;
394 struct zone *zone = NULL; 421 struct zone *zone = NULL;
422 VM_BUG_ON(is_unevictable_lru(lru));
395 423
396 for (i = 0; i < pagevec_count(pvec); i++) { 424 for (i = 0; i < pagevec_count(pvec); i++) {
397 struct page *page = pvec->pages[i]; 425 struct page *page = pvec->pages[i];
@@ -403,6 +431,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
403 zone = pagezone; 431 zone = pagezone;
404 spin_lock_irq(&zone->lru_lock); 432 spin_lock_irq(&zone->lru_lock);
405 } 433 }
434 VM_BUG_ON(PageActive(page));
435 VM_BUG_ON(PageUnevictable(page));
406 VM_BUG_ON(PageLRU(page)); 436 VM_BUG_ON(PageLRU(page));
407 SetPageLRU(page); 437 SetPageLRU(page);
408 if (is_active_lru(lru)) 438 if (is_active_lru(lru))