aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/swap.c79
2 files changed, 28 insertions, 62 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index da73742e52a5..ad15b5ef2599 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -55,16 +55,7 @@ int migrate_prep(void)
55 55
56static inline void move_to_lru(struct page *page) 56static inline void move_to_lru(struct page *page)
57{ 57{
58 if (PageActive(page)) { 58 lru_cache_add_lru(page, page_lru(page));
59 /*
60 * lru_cache_add_active checks that
61 * the PG_active bit is off.
62 */
63 ClearPageActive(page);
64 lru_cache_add_active(page);
65 } else {
66 lru_cache_add(page);
67 }
68 put_page(page); 59 put_page(page);
69} 60}
70 61
diff --git a/mm/swap.c b/mm/swap.c
index 82c2b3a76f94..e3045040dc3e 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,8 +34,7 @@
34/* How many pages do we try to swap or page in/out together? */ 34/* How many pages do we try to swap or page in/out together? */
35int page_cluster; 35int page_cluster;
36 36
37static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs); 37static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
38static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
39static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 38static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
40 39
41/* 40/*
@@ -186,28 +185,29 @@ void mark_page_accessed(struct page *page)
186 185
187EXPORT_SYMBOL(mark_page_accessed); 186EXPORT_SYMBOL(mark_page_accessed);
188 187
189/** 188void __lru_cache_add(struct page *page, enum lru_list lru)
190 * lru_cache_add: add a page to the page lists
191 * @page: the page to add
192 */
193void lru_cache_add(struct page *page)
194{ 189{
195 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 190 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
196 191
197 page_cache_get(page); 192 page_cache_get(page);
198 if (!pagevec_add(pvec, page)) 193 if (!pagevec_add(pvec, page))
199 __pagevec_lru_add(pvec); 194 ____pagevec_lru_add(pvec, lru);
200 put_cpu_var(lru_add_pvecs); 195 put_cpu_var(lru_add_pvecs);
201} 196}
202 197
203void lru_cache_add_active(struct page *page) 198/**
199 * lru_cache_add_lru - add a page to a page list
200 * @page: the page to be added to the LRU.
201 * @lru: the LRU list to which the page is added.
202 */
203void lru_cache_add_lru(struct page *page, enum lru_list lru)
204{ 204{
205 struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); 205 if (PageActive(page)) {
206 ClearPageActive(page);
207 }
206 208
207 page_cache_get(page); 209 VM_BUG_ON(PageLRU(page) || PageActive(page));
208 if (!pagevec_add(pvec, page)) 210 __lru_cache_add(page, lru);
209 __pagevec_lru_add_active(pvec);
210 put_cpu_var(lru_add_active_pvecs);
211} 211}
212 212
213/* 213/*
@@ -217,15 +217,15 @@ void lru_cache_add_active(struct page *page)
217 */ 217 */
218static void drain_cpu_pagevecs(int cpu) 218static void drain_cpu_pagevecs(int cpu)
219{ 219{
220 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
220 struct pagevec *pvec; 221 struct pagevec *pvec;
222 int lru;
221 223
222 pvec = &per_cpu(lru_add_pvecs, cpu); 224 for_each_lru(lru) {
223 if (pagevec_count(pvec)) 225 pvec = &pvecs[lru - LRU_BASE];
224 __pagevec_lru_add(pvec); 226 if (pagevec_count(pvec))
225 227 ____pagevec_lru_add(pvec, lru);
226 pvec = &per_cpu(lru_add_active_pvecs, cpu); 228 }
227 if (pagevec_count(pvec))
228 __pagevec_lru_add_active(pvec);
229 229
230 pvec = &per_cpu(lru_rotate_pvecs, cpu); 230 pvec = &per_cpu(lru_rotate_pvecs, cpu);
231 if (pagevec_count(pvec)) { 231 if (pagevec_count(pvec)) {
@@ -380,7 +380,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
380 * Add the passed pages to the LRU, then drop the caller's refcount 380 * Add the passed pages to the LRU, then drop the caller's refcount
381 * on them. Reinitialises the caller's pagevec. 381 * on them. Reinitialises the caller's pagevec.
382 */ 382 */
383void __pagevec_lru_add(struct pagevec *pvec) 383void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
384{ 384{
385 int i; 385 int i;
386 struct zone *zone = NULL; 386 struct zone *zone = NULL;
@@ -397,7 +397,9 @@ void __pagevec_lru_add(struct pagevec *pvec)
397 } 397 }
398 VM_BUG_ON(PageLRU(page)); 398 VM_BUG_ON(PageLRU(page));
399 SetPageLRU(page); 399 SetPageLRU(page);
400 add_page_to_inactive_list(zone, page); 400 if (is_active_lru(lru))
401 SetPageActive(page);
402 add_page_to_lru_list(zone, page, lru);
401 } 403 }
402 if (zone) 404 if (zone)
403 spin_unlock_irq(&zone->lru_lock); 405 spin_unlock_irq(&zone->lru_lock);
@@ -405,34 +407,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
405 pagevec_reinit(pvec); 407 pagevec_reinit(pvec);
406} 408}
407 409
408EXPORT_SYMBOL(__pagevec_lru_add); 410EXPORT_SYMBOL(____pagevec_lru_add);
409
410void __pagevec_lru_add_active(struct pagevec *pvec)
411{
412 int i;
413 struct zone *zone = NULL;
414
415 for (i = 0; i < pagevec_count(pvec); i++) {
416 struct page *page = pvec->pages[i];
417 struct zone *pagezone = page_zone(page);
418
419 if (pagezone != zone) {
420 if (zone)
421 spin_unlock_irq(&zone->lru_lock);
422 zone = pagezone;
423 spin_lock_irq(&zone->lru_lock);
424 }
425 VM_BUG_ON(PageLRU(page));
426 SetPageLRU(page);
427 VM_BUG_ON(PageActive(page));
428 SetPageActive(page);
429 add_page_to_active_list(zone, page);
430 }
431 if (zone)
432 spin_unlock_irq(&zone->lru_lock);
433 release_pages(pvec->pages, pvec->nr, pvec->cold);
434 pagevec_reinit(pvec);
435}
436 411
437/* 412/*
438 * Try to drop buffers from the pages in a pagevec 413 * Try to drop buffers from the pages in a pagevec