diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-01-13 18:47:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:50 -0500 |
commit | d8505dee1a87b8d41b9c4ee1325cd72258226fbc (patch) | |
tree | 292c67a565c67495e78c8a403e2c2a74228d116b /mm | |
parent | c06b1fca18c3ad868bfcaca230146e3038583422 (diff) |
mm: simplify code of swap.c
Clean up code and remove duplicate code. Next patch will use
pagevec_lru_move_fn introduced here too.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/swap.c | 101 |
1 files changed, 54 insertions, 47 deletions
@@ -178,15 +178,13 @@ void put_pages_list(struct list_head *pages) | |||
178 | } | 178 | } |
179 | EXPORT_SYMBOL(put_pages_list); | 179 | EXPORT_SYMBOL(put_pages_list); |
180 | 180 | ||
181 | /* | 181 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
182 | * pagevec_move_tail() must be called with IRQ disabled. | 182 | void (*move_fn)(struct page *page, void *arg), |
183 | * Otherwise this may cause nasty races. | 183 | void *arg) |
184 | */ | ||
185 | static void pagevec_move_tail(struct pagevec *pvec) | ||
186 | { | 184 | { |
187 | int i; | 185 | int i; |
188 | int pgmoved = 0; | ||
189 | struct zone *zone = NULL; | 186 | struct zone *zone = NULL; |
187 | unsigned long flags = 0; | ||
190 | 188 | ||
191 | for (i = 0; i < pagevec_count(pvec); i++) { | 189 | for (i = 0; i < pagevec_count(pvec); i++) { |
192 | struct page *page = pvec->pages[i]; | 190 | struct page *page = pvec->pages[i]; |
@@ -194,29 +192,49 @@ static void pagevec_move_tail(struct pagevec *pvec) | |||
194 | 192 | ||
195 | if (pagezone != zone) { | 193 | if (pagezone != zone) { |
196 | if (zone) | 194 | if (zone) |
197 | spin_unlock(&zone->lru_lock); | 195 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
198 | zone = pagezone; | 196 | zone = pagezone; |
199 | spin_lock(&zone->lru_lock); | 197 | spin_lock_irqsave(&zone->lru_lock, flags); |
200 | } | ||
201 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | ||
202 | int lru = page_lru_base_type(page); | ||
203 | list_move_tail(&page->lru, &zone->lru[lru].list); | ||
204 | pgmoved++; | ||
205 | } | 198 | } |
199 | |||
200 | (*move_fn)(page, arg); | ||
206 | } | 201 | } |
207 | if (zone) | 202 | if (zone) |
208 | spin_unlock(&zone->lru_lock); | 203 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
209 | __count_vm_events(PGROTATED, pgmoved); | 204 | release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); |
210 | release_pages(pvec->pages, pvec->nr, pvec->cold); | ||
211 | pagevec_reinit(pvec); | 205 | pagevec_reinit(pvec); |
212 | } | 206 | } |
213 | 207 | ||
208 | static void pagevec_move_tail_fn(struct page *page, void *arg) | ||
209 | { | ||
210 | int *pgmoved = arg; | ||
211 | struct zone *zone = page_zone(page); | ||
212 | |||
213 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | ||
214 | int lru = page_lru_base_type(page); | ||
215 | list_move_tail(&page->lru, &zone->lru[lru].list); | ||
216 | (*pgmoved)++; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * pagevec_move_tail() must be called with IRQ disabled. | ||
222 | * Otherwise this may cause nasty races. | ||
223 | */ | ||
224 | static void pagevec_move_tail(struct pagevec *pvec) | ||
225 | { | ||
226 | int pgmoved = 0; | ||
227 | |||
228 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); | ||
229 | __count_vm_events(PGROTATED, pgmoved); | ||
230 | } | ||
231 | |||
214 | /* | 232 | /* |
215 | * Writeback is about to end against a page which has been marked for immediate | 233 | * Writeback is about to end against a page which has been marked for immediate |
216 | * reclaim. If it still appears to be reclaimable, move it to the tail of the | 234 | * reclaim. If it still appears to be reclaimable, move it to the tail of the |
217 | * inactive list. | 235 | * inactive list. |
218 | */ | 236 | */ |
219 | void rotate_reclaimable_page(struct page *page) | 237 | void rotate_reclaimable_page(struct page *page) |
220 | { | 238 | { |
221 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && | 239 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && |
222 | !PageUnevictable(page) && PageLRU(page)) { | 240 | !PageUnevictable(page) && PageLRU(page)) { |
@@ -516,44 +534,33 @@ void lru_add_page_tail(struct zone* zone, | |||
516 | } | 534 | } |
517 | } | 535 | } |
518 | 536 | ||
537 | static void ____pagevec_lru_add_fn(struct page *page, void *arg) | ||
538 | { | ||
539 | enum lru_list lru = (enum lru_list)arg; | ||
540 | struct zone *zone = page_zone(page); | ||
541 | int file = is_file_lru(lru); | ||
542 | int active = is_active_lru(lru); | ||
543 | |||
544 | VM_BUG_ON(PageActive(page)); | ||
545 | VM_BUG_ON(PageUnevictable(page)); | ||
546 | VM_BUG_ON(PageLRU(page)); | ||
547 | |||
548 | SetPageLRU(page); | ||
549 | if (active) | ||
550 | SetPageActive(page); | ||
551 | update_page_reclaim_stat(zone, page, file, active); | ||
552 | add_page_to_lru_list(zone, page, lru); | ||
553 | } | ||
554 | |||
519 | /* | 555 | /* |
520 | * Add the passed pages to the LRU, then drop the caller's refcount | 556 | * Add the passed pages to the LRU, then drop the caller's refcount |
521 | * on them. Reinitialises the caller's pagevec. | 557 | * on them. Reinitialises the caller's pagevec. |
522 | */ | 558 | */ |
523 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | 559 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) |
524 | { | 560 | { |
525 | int i; | ||
526 | struct zone *zone = NULL; | ||
527 | |||
528 | VM_BUG_ON(is_unevictable_lru(lru)); | 561 | VM_BUG_ON(is_unevictable_lru(lru)); |
529 | 562 | ||
530 | for (i = 0; i < pagevec_count(pvec); i++) { | 563 | pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); |
531 | struct page *page = pvec->pages[i]; | ||
532 | struct zone *pagezone = page_zone(page); | ||
533 | int file; | ||
534 | int active; | ||
535 | |||
536 | if (pagezone != zone) { | ||
537 | if (zone) | ||
538 | spin_unlock_irq(&zone->lru_lock); | ||
539 | zone = pagezone; | ||
540 | spin_lock_irq(&zone->lru_lock); | ||
541 | } | ||
542 | VM_BUG_ON(PageActive(page)); | ||
543 | VM_BUG_ON(PageUnevictable(page)); | ||
544 | VM_BUG_ON(PageLRU(page)); | ||
545 | SetPageLRU(page); | ||
546 | active = is_active_lru(lru); | ||
547 | file = is_file_lru(lru); | ||
548 | if (active) | ||
549 | SetPageActive(page); | ||
550 | update_page_reclaim_stat(zone, page, file, active); | ||
551 | add_page_to_lru_list(zone, page, lru); | ||
552 | } | ||
553 | if (zone) | ||
554 | spin_unlock_irq(&zone->lru_lock); | ||
555 | release_pages(pvec->pages, pvec->nr, pvec->cold); | ||
556 | pagevec_reinit(pvec); | ||
557 | } | 564 | } |
558 | 565 | ||
559 | EXPORT_SYMBOL(____pagevec_lru_add); | 566 | EXPORT_SYMBOL(____pagevec_lru_add); |