aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c77
1 files changed, 27 insertions, 50 deletions
diff --git a/mm/swap.c b/mm/swap.c
index b135ec90cdeb..8adb9feb61e1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -151,6 +151,26 @@ void rotate_reclaimable_page(struct page *page)
151 } 151 }
152} 152}
153 153
154static void update_page_reclaim_stat(struct zone *zone, struct page *page,
155 int file, int rotated)
156{
157 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
158 struct zone_reclaim_stat *memcg_reclaim_stat;
159
160 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
161
162 reclaim_stat->recent_scanned[file]++;
163 if (rotated)
164 reclaim_stat->recent_rotated[file]++;
165
166 if (!memcg_reclaim_stat)
167 return;
168
169 memcg_reclaim_stat->recent_scanned[file]++;
170 if (rotated)
171 memcg_reclaim_stat->recent_rotated[file]++;
172}
173
154/* 174/*
155 * FIXME: speed this up? 175 * FIXME: speed this up?
156 */ 176 */
@@ -168,10 +188,8 @@ void activate_page(struct page *page)
168 lru += LRU_ACTIVE; 188 lru += LRU_ACTIVE;
169 add_page_to_lru_list(zone, page, lru); 189 add_page_to_lru_list(zone, page, lru);
170 __count_vm_event(PGACTIVATE); 190 __count_vm_event(PGACTIVATE);
171 mem_cgroup_move_lists(page, lru);
172 191
173 zone->recent_rotated[!!file]++; 192 update_page_reclaim_stat(zone, page, !!file, 1);
174 zone->recent_scanned[!!file]++;
175 } 193 }
176 spin_unlock_irq(&zone->lru_lock); 194 spin_unlock_irq(&zone->lru_lock);
177} 195}
@@ -246,25 +264,6 @@ void add_page_to_unevictable_list(struct page *page)
246 spin_unlock_irq(&zone->lru_lock); 264 spin_unlock_irq(&zone->lru_lock);
247} 265}
248 266
249/**
250 * lru_cache_add_active_or_unevictable
251 * @page: the page to be added to LRU
252 * @vma: vma in which page is mapped for determining reclaimability
253 *
254 * place @page on active or unevictable LRU list, depending on
255 * page_evictable(). Note that if the page is not evictable,
256 * it goes directly back onto it's zone's unevictable list. It does
257 * NOT use a per cpu pagevec.
258 */
259void lru_cache_add_active_or_unevictable(struct page *page,
260 struct vm_area_struct *vma)
261{
262 if (page_evictable(page, vma))
263 lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
264 else
265 add_page_to_unevictable_list(page);
266}
267
268/* 267/*
269 * Drain pages out of the cpu's pagevecs. 268 * Drain pages out of the cpu's pagevecs.
270 * Either "cpu" is the current CPU, and preemption has already been 269 * Either "cpu" is the current CPU, and preemption has already been
@@ -398,28 +397,6 @@ void __pagevec_release(struct pagevec *pvec)
398EXPORT_SYMBOL(__pagevec_release); 397EXPORT_SYMBOL(__pagevec_release);
399 398
400/* 399/*
401 * pagevec_release() for pages which are known to not be on the LRU
402 *
403 * This function reinitialises the caller's pagevec.
404 */
405void __pagevec_release_nonlru(struct pagevec *pvec)
406{
407 int i;
408 struct pagevec pages_to_free;
409
410 pagevec_init(&pages_to_free, pvec->cold);
411 for (i = 0; i < pagevec_count(pvec); i++) {
412 struct page *page = pvec->pages[i];
413
414 VM_BUG_ON(PageLRU(page));
415 if (put_page_testzero(page))
416 pagevec_add(&pages_to_free, page);
417 }
418 pagevec_free(&pages_to_free);
419 pagevec_reinit(pvec);
420}
421
422/*
423 * Add the passed pages to the LRU, then drop the caller's refcount 400 * Add the passed pages to the LRU, then drop the caller's refcount
424 * on them. Reinitialises the caller's pagevec. 401 * on them. Reinitialises the caller's pagevec.
425 */ 402 */
@@ -427,12 +404,14 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
427{ 404{
428 int i; 405 int i;
429 struct zone *zone = NULL; 406 struct zone *zone = NULL;
407
430 VM_BUG_ON(is_unevictable_lru(lru)); 408 VM_BUG_ON(is_unevictable_lru(lru));
431 409
432 for (i = 0; i < pagevec_count(pvec); i++) { 410 for (i = 0; i < pagevec_count(pvec); i++) {
433 struct page *page = pvec->pages[i]; 411 struct page *page = pvec->pages[i];
434 struct zone *pagezone = page_zone(page); 412 struct zone *pagezone = page_zone(page);
435 int file; 413 int file;
414 int active;
436 415
437 if (pagezone != zone) { 416 if (pagezone != zone) {
438 if (zone) 417 if (zone)
@@ -444,12 +423,11 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
444 VM_BUG_ON(PageUnevictable(page)); 423 VM_BUG_ON(PageUnevictable(page));
445 VM_BUG_ON(PageLRU(page)); 424 VM_BUG_ON(PageLRU(page));
446 SetPageLRU(page); 425 SetPageLRU(page);
426 active = is_active_lru(lru);
447 file = is_file_lru(lru); 427 file = is_file_lru(lru);
448 zone->recent_scanned[file]++; 428 if (active)
449 if (is_active_lru(lru)) {
450 SetPageActive(page); 429 SetPageActive(page);
451 zone->recent_rotated[file]++; 430 update_page_reclaim_stat(zone, page, file, active);
452 }
453 add_page_to_lru_list(zone, page, lru); 431 add_page_to_lru_list(zone, page, lru);
454 } 432 }
455 if (zone) 433 if (zone)
@@ -495,8 +473,7 @@ void pagevec_swap_free(struct pagevec *pvec)
495 struct page *page = pvec->pages[i]; 473 struct page *page = pvec->pages[i];
496 474
497 if (PageSwapCache(page) && trylock_page(page)) { 475 if (PageSwapCache(page) && trylock_page(page)) {
498 if (PageSwapCache(page)) 476 try_to_free_swap(page);
499 remove_exclusive_swap_page_ref(page);
500 unlock_page(page); 477 unlock_page(page);
501 } 478 }
502 } 479 }