aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c101
1 files changed, 47 insertions, 54 deletions
diff --git a/mm/swap.c b/mm/swap.c
index ab498ea04ae3..c02f93611a84 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -178,13 +178,15 @@ void put_pages_list(struct list_head *pages)
178} 178}
179EXPORT_SYMBOL(put_pages_list); 179EXPORT_SYMBOL(put_pages_list);
180 180
181static void pagevec_lru_move_fn(struct pagevec *pvec, 181/*
182 void (*move_fn)(struct page *page, void *arg), 182 * pagevec_move_tail() must be called with IRQ disabled.
183 void *arg) 183 * Otherwise this may cause nasty races.
184 */
185static void pagevec_move_tail(struct pagevec *pvec)
184{ 186{
185 int i; 187 int i;
188 int pgmoved = 0;
186 struct zone *zone = NULL; 189 struct zone *zone = NULL;
187 unsigned long flags = 0;
188 190
189 for (i = 0; i < pagevec_count(pvec); i++) { 191 for (i = 0; i < pagevec_count(pvec); i++) {
190 struct page *page = pvec->pages[i]; 192 struct page *page = pvec->pages[i];
@@ -192,41 +194,21 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
192 194
193 if (pagezone != zone) { 195 if (pagezone != zone) {
194 if (zone) 196 if (zone)
195 spin_unlock_irqrestore(&zone->lru_lock, flags); 197 spin_unlock(&zone->lru_lock);
196 zone = pagezone; 198 zone = pagezone;
197 spin_lock_irqsave(&zone->lru_lock, flags); 199 spin_lock(&zone->lru_lock);
200 }
201 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
202 int lru = page_lru_base_type(page);
203 list_move_tail(&page->lru, &zone->lru[lru].list);
204 pgmoved++;
198 } 205 }
199
200 (*move_fn)(page, arg);
201 } 206 }
202 if (zone) 207 if (zone)
203 spin_unlock_irqrestore(&zone->lru_lock, flags); 208 spin_unlock(&zone->lru_lock);
204 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
205 pagevec_reinit(pvec);
206}
207
208static void pagevec_move_tail_fn(struct page *page, void *arg)
209{
210 int *pgmoved = arg;
211 struct zone *zone = page_zone(page);
212
213 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
214 int lru = page_lru_base_type(page);
215 list_move_tail(&page->lru, &zone->lru[lru].list);
216 (*pgmoved)++;
217 }
218}
219
220/*
221 * pagevec_move_tail() must be called with IRQ disabled.
222 * Otherwise this may cause nasty races.
223 */
224static void pagevec_move_tail(struct pagevec *pvec)
225{
226 int pgmoved = 0;
227
228 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
229 __count_vm_events(PGROTATED, pgmoved); 209 __count_vm_events(PGROTATED, pgmoved);
210 release_pages(pvec->pages, pvec->nr, pvec->cold);
211 pagevec_reinit(pvec);
230} 212}
231 213
232/* 214/*
@@ -234,7 +216,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
234 * reclaim. If it still appears to be reclaimable, move it to the tail of the 216 * reclaim. If it still appears to be reclaimable, move it to the tail of the
235 * inactive list. 217 * inactive list.
236 */ 218 */
237void rotate_reclaimable_page(struct page *page) 219void rotate_reclaimable_page(struct page *page)
238{ 220{
239 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 221 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
240 !PageUnevictable(page) && PageLRU(page)) { 222 !PageUnevictable(page) && PageLRU(page)) {
@@ -534,33 +516,44 @@ void lru_add_page_tail(struct zone* zone,
534 } 516 }
535} 517}
536 518
537static void ____pagevec_lru_add_fn(struct page *page, void *arg)
538{
539 enum lru_list lru = (enum lru_list)arg;
540 struct zone *zone = page_zone(page);
541 int file = is_file_lru(lru);
542 int active = is_active_lru(lru);
543
544 VM_BUG_ON(PageActive(page));
545 VM_BUG_ON(PageUnevictable(page));
546 VM_BUG_ON(PageLRU(page));
547
548 SetPageLRU(page);
549 if (active)
550 SetPageActive(page);
551 update_page_reclaim_stat(zone, page, file, active);
552 add_page_to_lru_list(zone, page, lru);
553}
554
555/* 519/*
556 * Add the passed pages to the LRU, then drop the caller's refcount 520 * Add the passed pages to the LRU, then drop the caller's refcount
557 * on them. Reinitialises the caller's pagevec. 521 * on them. Reinitialises the caller's pagevec.
558 */ 522 */
559void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 523void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
560{ 524{
525 int i;
526 struct zone *zone = NULL;
527
561 VM_BUG_ON(is_unevictable_lru(lru)); 528 VM_BUG_ON(is_unevictable_lru(lru));
562 529
563 pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); 530 for (i = 0; i < pagevec_count(pvec); i++) {
531 struct page *page = pvec->pages[i];
532 struct zone *pagezone = page_zone(page);
533 int file;
534 int active;
535
536 if (pagezone != zone) {
537 if (zone)
538 spin_unlock_irq(&zone->lru_lock);
539 zone = pagezone;
540 spin_lock_irq(&zone->lru_lock);
541 }
542 VM_BUG_ON(PageActive(page));
543 VM_BUG_ON(PageUnevictable(page));
544 VM_BUG_ON(PageLRU(page));
545 SetPageLRU(page);
546 active = is_active_lru(lru);
547 file = is_file_lru(lru);
548 if (active)
549 SetPageActive(page);
550 update_page_reclaim_stat(zone, page, file, active);
551 add_page_to_lru_list(zone, page, lru);
552 }
553 if (zone)
554 spin_unlock_irq(&zone->lru_lock);
555 release_pages(pvec->pages, pvec->nr, pvec->cold);
556 pagevec_reinit(pvec);
564} 557}
565 558
566EXPORT_SYMBOL(____pagevec_lru_add); 559EXPORT_SYMBOL(____pagevec_lru_add);