diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-03-22 19:33:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:09 -0400 |
commit | 3dd7ae8ec0ef399bfea347f297d2a95504d35571 (patch) | |
tree | 6e049499c7502e55d45fbbf2ed83075ed2fbc481 /mm/swap.c | |
parent | bee4c36a5cf5c9f63ce1d7372aa62045fbd16d47 (diff) |
mm: simplify code of swap.c
Clean up code and remove duplicate code. Next patch will use
pagevec_lru_move_fn introduced here too.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com>
Cc: Andi Kleen <andi@firstfloor.org>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 133 |
1 files changed, 58 insertions, 75 deletions
@@ -179,15 +179,13 @@ void put_pages_list(struct list_head *pages) | |||
179 | } | 179 | } |
180 | EXPORT_SYMBOL(put_pages_list); | 180 | EXPORT_SYMBOL(put_pages_list); |
181 | 181 | ||
182 | /* | 182 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
183 | * pagevec_move_tail() must be called with IRQ disabled. | 183 | void (*move_fn)(struct page *page, void *arg), |
184 | * Otherwise this may cause nasty races. | 184 | void *arg) |
185 | */ | ||
186 | static void pagevec_move_tail(struct pagevec *pvec) | ||
187 | { | 185 | { |
188 | int i; | 186 | int i; |
189 | int pgmoved = 0; | ||
190 | struct zone *zone = NULL; | 187 | struct zone *zone = NULL; |
188 | unsigned long flags = 0; | ||
191 | 189 | ||
192 | for (i = 0; i < pagevec_count(pvec); i++) { | 190 | for (i = 0; i < pagevec_count(pvec); i++) { |
193 | struct page *page = pvec->pages[i]; | 191 | struct page *page = pvec->pages[i]; |
@@ -195,30 +193,50 @@ static void pagevec_move_tail(struct pagevec *pvec) | |||
195 | 193 | ||
196 | if (pagezone != zone) { | 194 | if (pagezone != zone) { |
197 | if (zone) | 195 | if (zone) |
198 | spin_unlock(&zone->lru_lock); | 196 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
199 | zone = pagezone; | 197 | zone = pagezone; |
200 | spin_lock(&zone->lru_lock); | 198 | spin_lock_irqsave(&zone->lru_lock, flags); |
201 | } | ||
202 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | ||
203 | enum lru_list lru = page_lru_base_type(page); | ||
204 | list_move_tail(&page->lru, &zone->lru[lru].list); | ||
205 | mem_cgroup_rotate_reclaimable_page(page); | ||
206 | pgmoved++; | ||
207 | } | 199 | } |
200 | |||
201 | (*move_fn)(page, arg); | ||
208 | } | 202 | } |
209 | if (zone) | 203 | if (zone) |
210 | spin_unlock(&zone->lru_lock); | 204 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
211 | __count_vm_events(PGROTATED, pgmoved); | ||
212 | release_pages(pvec->pages, pvec->nr, pvec->cold); | 205 | release_pages(pvec->pages, pvec->nr, pvec->cold); |
213 | pagevec_reinit(pvec); | 206 | pagevec_reinit(pvec); |
214 | } | 207 | } |
215 | 208 | ||
209 | static void pagevec_move_tail_fn(struct page *page, void *arg) | ||
210 | { | ||
211 | int *pgmoved = arg; | ||
212 | struct zone *zone = page_zone(page); | ||
213 | |||
214 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | ||
215 | enum lru_list lru = page_lru_base_type(page); | ||
216 | list_move_tail(&page->lru, &zone->lru[lru].list); | ||
217 | mem_cgroup_rotate_reclaimable_page(page); | ||
218 | (*pgmoved)++; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * pagevec_move_tail() must be called with IRQ disabled. | ||
224 | * Otherwise this may cause nasty races. | ||
225 | */ | ||
226 | static void pagevec_move_tail(struct pagevec *pvec) | ||
227 | { | ||
228 | int pgmoved = 0; | ||
229 | |||
230 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); | ||
231 | __count_vm_events(PGROTATED, pgmoved); | ||
232 | } | ||
233 | |||
216 | /* | 234 | /* |
217 | * Writeback is about to end against a page which has been marked for immediate | 235 | * Writeback is about to end against a page which has been marked for immediate |
218 | * reclaim. If it still appears to be reclaimable, move it to the tail of the | 236 | * reclaim. If it still appears to be reclaimable, move it to the tail of the |
219 | * inactive list. | 237 | * inactive list. |
220 | */ | 238 | */ |
221 | void rotate_reclaimable_page(struct page *page) | 239 | void rotate_reclaimable_page(struct page *page) |
222 | { | 240 | { |
223 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && | 241 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && |
224 | !PageUnevictable(page) && PageLRU(page)) { | 242 | !PageUnevictable(page) && PageLRU(page)) { |
@@ -369,10 +387,11 @@ void add_page_to_unevictable_list(struct page *page) | |||
369 | * be write it out by flusher threads as this is much more effective | 387 | * be write it out by flusher threads as this is much more effective |
370 | * than the single-page writeout from reclaim. | 388 | * than the single-page writeout from reclaim. |
371 | */ | 389 | */ |
372 | static void lru_deactivate(struct page *page, struct zone *zone) | 390 | static void lru_deactivate_fn(struct page *page, void *arg) |
373 | { | 391 | { |
374 | int lru, file; | 392 | int lru, file; |
375 | bool active; | 393 | bool active; |
394 | struct zone *zone = page_zone(page); | ||
376 | 395 | ||
377 | if (!PageLRU(page)) | 396 | if (!PageLRU(page)) |
378 | return; | 397 | return; |
@@ -412,31 +431,6 @@ static void lru_deactivate(struct page *page, struct zone *zone) | |||
412 | update_page_reclaim_stat(zone, page, file, 0); | 431 | update_page_reclaim_stat(zone, page, file, 0); |
413 | } | 432 | } |
414 | 433 | ||
415 | static void ____pagevec_lru_deactivate(struct pagevec *pvec) | ||
416 | { | ||
417 | int i; | ||
418 | struct zone *zone = NULL; | ||
419 | |||
420 | for (i = 0; i < pagevec_count(pvec); i++) { | ||
421 | struct page *page = pvec->pages[i]; | ||
422 | struct zone *pagezone = page_zone(page); | ||
423 | |||
424 | if (pagezone != zone) { | ||
425 | if (zone) | ||
426 | spin_unlock_irq(&zone->lru_lock); | ||
427 | zone = pagezone; | ||
428 | spin_lock_irq(&zone->lru_lock); | ||
429 | } | ||
430 | lru_deactivate(page, zone); | ||
431 | } | ||
432 | if (zone) | ||
433 | spin_unlock_irq(&zone->lru_lock); | ||
434 | |||
435 | release_pages(pvec->pages, pvec->nr, pvec->cold); | ||
436 | pagevec_reinit(pvec); | ||
437 | } | ||
438 | |||
439 | |||
440 | /* | 434 | /* |
441 | * Drain pages out of the cpu's pagevecs. | 435 | * Drain pages out of the cpu's pagevecs. |
442 | * Either "cpu" is the current CPU, and preemption has already been | 436 | * Either "cpu" is the current CPU, and preemption has already been |
@@ -466,7 +460,7 @@ static void drain_cpu_pagevecs(int cpu) | |||
466 | 460 | ||
467 | pvec = &per_cpu(lru_deactivate_pvecs, cpu); | 461 | pvec = &per_cpu(lru_deactivate_pvecs, cpu); |
468 | if (pagevec_count(pvec)) | 462 | if (pagevec_count(pvec)) |
469 | ____pagevec_lru_deactivate(pvec); | 463 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
470 | } | 464 | } |
471 | 465 | ||
472 | /** | 466 | /** |
@@ -483,7 +477,7 @@ void deactivate_page(struct page *page) | |||
483 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); | 477 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); |
484 | 478 | ||
485 | if (!pagevec_add(pvec, page)) | 479 | if (!pagevec_add(pvec, page)) |
486 | ____pagevec_lru_deactivate(pvec); | 480 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
487 | put_cpu_var(lru_deactivate_pvecs); | 481 | put_cpu_var(lru_deactivate_pvecs); |
488 | } | 482 | } |
489 | } | 483 | } |
@@ -630,44 +624,33 @@ void lru_add_page_tail(struct zone* zone, | |||
630 | } | 624 | } |
631 | } | 625 | } |
632 | 626 | ||
627 | static void ____pagevec_lru_add_fn(struct page *page, void *arg) | ||
628 | { | ||
629 | enum lru_list lru = (enum lru_list)arg; | ||
630 | struct zone *zone = page_zone(page); | ||
631 | int file = is_file_lru(lru); | ||
632 | int active = is_active_lru(lru); | ||
633 | |||
634 | VM_BUG_ON(PageActive(page)); | ||
635 | VM_BUG_ON(PageUnevictable(page)); | ||
636 | VM_BUG_ON(PageLRU(page)); | ||
637 | |||
638 | SetPageLRU(page); | ||
639 | if (active) | ||
640 | SetPageActive(page); | ||
641 | update_page_reclaim_stat(zone, page, file, active); | ||
642 | add_page_to_lru_list(zone, page, lru); | ||
643 | } | ||
644 | |||
633 | /* | 645 | /* |
634 | * Add the passed pages to the LRU, then drop the caller's refcount | 646 | * Add the passed pages to the LRU, then drop the caller's refcount |
635 | * on them. Reinitialises the caller's pagevec. | 647 | * on them. Reinitialises the caller's pagevec. |
636 | */ | 648 | */ |
637 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | 649 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) |
638 | { | 650 | { |
639 | int i; | ||
640 | struct zone *zone = NULL; | ||
641 | |||
642 | VM_BUG_ON(is_unevictable_lru(lru)); | 651 | VM_BUG_ON(is_unevictable_lru(lru)); |
643 | 652 | ||
644 | for (i = 0; i < pagevec_count(pvec); i++) { | 653 | pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); |
645 | struct page *page = pvec->pages[i]; | ||
646 | struct zone *pagezone = page_zone(page); | ||
647 | int file; | ||
648 | int active; | ||
649 | |||
650 | if (pagezone != zone) { | ||
651 | if (zone) | ||
652 | spin_unlock_irq(&zone->lru_lock); | ||
653 | zone = pagezone; | ||
654 | spin_lock_irq(&zone->lru_lock); | ||
655 | } | ||
656 | VM_BUG_ON(PageActive(page)); | ||
657 | VM_BUG_ON(PageUnevictable(page)); | ||
658 | VM_BUG_ON(PageLRU(page)); | ||
659 | SetPageLRU(page); | ||
660 | active = is_active_lru(lru); | ||
661 | file = is_file_lru(lru); | ||
662 | if (active) | ||
663 | SetPageActive(page); | ||
664 | update_page_reclaim_stat(zone, page, file, active); | ||
665 | add_page_to_lru_list(zone, page, lru); | ||
666 | } | ||
667 | if (zone) | ||
668 | spin_unlock_irq(&zone->lru_lock); | ||
669 | release_pages(pvec->pages, pvec->nr, pvec->cold); | ||
670 | pagevec_reinit(pvec); | ||
671 | } | 654 | } |
672 | 655 | ||
673 | EXPORT_SYMBOL(____pagevec_lru_add); | 656 | EXPORT_SYMBOL(____pagevec_lru_add); |