aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c172
1 files changed, 115 insertions, 57 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 9e0cb3118079..2152e48a7b8f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,11 +31,12 @@
31#include <linux/backing-dev.h> 31#include <linux/backing-dev.h>
32#include <linux/memcontrol.h> 32#include <linux/memcontrol.h>
33 33
34#include "internal.h"
35
34/* How many pages do we try to swap or page in/out together? */ 36/* How many pages do we try to swap or page in/out together? */
35int page_cluster; 37int page_cluster;
36 38
37static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs); 39static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
38static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
39static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 40static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
40 41
41/* 42/*
@@ -116,8 +117,9 @@ static void pagevec_move_tail(struct pagevec *pvec)
116 zone = pagezone; 117 zone = pagezone;
117 spin_lock(&zone->lru_lock); 118 spin_lock(&zone->lru_lock);
118 } 119 }
119 if (PageLRU(page) && !PageActive(page)) { 120 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
120 list_move_tail(&page->lru, &zone->inactive_list); 121 int lru = page_is_file_cache(page);
122 list_move_tail(&page->lru, &zone->lru[lru].list);
121 pgmoved++; 123 pgmoved++;
122 } 124 }
123 } 125 }
@@ -136,7 +138,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
136void rotate_reclaimable_page(struct page *page) 138void rotate_reclaimable_page(struct page *page)
137{ 139{
138 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 140 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
139 PageLRU(page)) { 141 !PageUnevictable(page) && PageLRU(page)) {
140 struct pagevec *pvec; 142 struct pagevec *pvec;
141 unsigned long flags; 143 unsigned long flags;
142 144
@@ -157,12 +159,19 @@ void activate_page(struct page *page)
157 struct zone *zone = page_zone(page); 159 struct zone *zone = page_zone(page);
158 160
159 spin_lock_irq(&zone->lru_lock); 161 spin_lock_irq(&zone->lru_lock);
160 if (PageLRU(page) && !PageActive(page)) { 162 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
161 del_page_from_inactive_list(zone, page); 163 int file = page_is_file_cache(page);
164 int lru = LRU_BASE + file;
165 del_page_from_lru_list(zone, page, lru);
166
162 SetPageActive(page); 167 SetPageActive(page);
163 add_page_to_active_list(zone, page); 168 lru += LRU_ACTIVE;
169 add_page_to_lru_list(zone, page, lru);
164 __count_vm_event(PGACTIVATE); 170 __count_vm_event(PGACTIVATE);
165 mem_cgroup_move_lists(page, true); 171 mem_cgroup_move_lists(page, lru);
172
173 zone->recent_rotated[!!file]++;
174 zone->recent_scanned[!!file]++;
166 } 175 }
167 spin_unlock_irq(&zone->lru_lock); 176 spin_unlock_irq(&zone->lru_lock);
168} 177}
@@ -176,7 +185,8 @@ void activate_page(struct page *page)
176 */ 185 */
177void mark_page_accessed(struct page *page) 186void mark_page_accessed(struct page *page)
178{ 187{
179 if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { 188 if (!PageActive(page) && !PageUnevictable(page) &&
189 PageReferenced(page) && PageLRU(page)) {
180 activate_page(page); 190 activate_page(page);
181 ClearPageReferenced(page); 191 ClearPageReferenced(page);
182 } else if (!PageReferenced(page)) { 192 } else if (!PageReferenced(page)) {
@@ -186,28 +196,73 @@ void mark_page_accessed(struct page *page)
186 196
187EXPORT_SYMBOL(mark_page_accessed); 197EXPORT_SYMBOL(mark_page_accessed);
188 198
189/** 199void __lru_cache_add(struct page *page, enum lru_list lru)
190 * lru_cache_add: add a page to the page lists
191 * @page: the page to add
192 */
193void lru_cache_add(struct page *page)
194{ 200{
195 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 201 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
196 202
197 page_cache_get(page); 203 page_cache_get(page);
198 if (!pagevec_add(pvec, page)) 204 if (!pagevec_add(pvec, page))
199 __pagevec_lru_add(pvec); 205 ____pagevec_lru_add(pvec, lru);
200 put_cpu_var(lru_add_pvecs); 206 put_cpu_var(lru_add_pvecs);
201} 207}
202 208
203void lru_cache_add_active(struct page *page) 209/**
210 * lru_cache_add_lru - add a page to a page list
211 * @page: the page to be added to the LRU.
212 * @lru: the LRU list to which the page is added.
213 */
214void lru_cache_add_lru(struct page *page, enum lru_list lru)
204{ 215{
205 struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); 216 if (PageActive(page)) {
217 VM_BUG_ON(PageUnevictable(page));
218 ClearPageActive(page);
219 } else if (PageUnevictable(page)) {
220 VM_BUG_ON(PageActive(page));
221 ClearPageUnevictable(page);
222 }
206 223
207 page_cache_get(page); 224 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
208 if (!pagevec_add(pvec, page)) 225 __lru_cache_add(page, lru);
209 __pagevec_lru_add_active(pvec); 226}
210 put_cpu_var(lru_add_active_pvecs); 227
228/**
229 * add_page_to_unevictable_list - add a page to the unevictable list
230 * @page: the page to be added to the unevictable list
231 *
232 * Add page directly to its zone's unevictable list. To avoid races with
233 * tasks that might be making the page evictable, through eg. munlock,
234 * munmap or exit, while it's not on the lru, we want to add the page
235 * while it's locked or otherwise "invisible" to other tasks. This is
236 * difficult to do when using the pagevec cache, so bypass that.
237 */
238void add_page_to_unevictable_list(struct page *page)
239{
240 struct zone *zone = page_zone(page);
241
242 spin_lock_irq(&zone->lru_lock);
243 SetPageUnevictable(page);
244 SetPageLRU(page);
245 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
246 spin_unlock_irq(&zone->lru_lock);
247}
248
249/**
250 * lru_cache_add_active_or_unevictable
251 * @page: the page to be added to LRU
252 * @vma: vma in which page is mapped for determining reclaimability
253 *
254 * place @page on active or unevictable LRU list, depending on
255 * page_evictable(). Note that if the page is not evictable,
256 * it goes directly back onto it's zone's unevictable list. It does
257 * NOT use a per cpu pagevec.
258 */
259void lru_cache_add_active_or_unevictable(struct page *page,
260 struct vm_area_struct *vma)
261{
262 if (page_evictable(page, vma))
263 lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
264 else
265 add_page_to_unevictable_list(page);
211} 266}
212 267
213/* 268/*
@@ -217,15 +272,15 @@ void lru_cache_add_active(struct page *page)
217 */ 272 */
218static void drain_cpu_pagevecs(int cpu) 273static void drain_cpu_pagevecs(int cpu)
219{ 274{
275 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
220 struct pagevec *pvec; 276 struct pagevec *pvec;
277 int lru;
221 278
222 pvec = &per_cpu(lru_add_pvecs, cpu); 279 for_each_lru(lru) {
223 if (pagevec_count(pvec)) 280 pvec = &pvecs[lru - LRU_BASE];
224 __pagevec_lru_add(pvec); 281 if (pagevec_count(pvec))
225 282 ____pagevec_lru_add(pvec, lru);
226 pvec = &per_cpu(lru_add_active_pvecs, cpu); 283 }
227 if (pagevec_count(pvec))
228 __pagevec_lru_add_active(pvec);
229 284
230 pvec = &per_cpu(lru_rotate_pvecs, cpu); 285 pvec = &per_cpu(lru_rotate_pvecs, cpu);
231 if (pagevec_count(pvec)) { 286 if (pagevec_count(pvec)) {
@@ -244,7 +299,7 @@ void lru_add_drain(void)
244 put_cpu(); 299 put_cpu();
245} 300}
246 301
247#ifdef CONFIG_NUMA 302#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
248static void lru_add_drain_per_cpu(struct work_struct *dummy) 303static void lru_add_drain_per_cpu(struct work_struct *dummy)
249{ 304{
250 lru_add_drain(); 305 lru_add_drain();
@@ -308,6 +363,7 @@ void release_pages(struct page **pages, int nr, int cold)
308 363
309 if (PageLRU(page)) { 364 if (PageLRU(page)) {
310 struct zone *pagezone = page_zone(page); 365 struct zone *pagezone = page_zone(page);
366
311 if (pagezone != zone) { 367 if (pagezone != zone) {
312 if (zone) 368 if (zone)
313 spin_unlock_irqrestore(&zone->lru_lock, 369 spin_unlock_irqrestore(&zone->lru_lock,
@@ -380,10 +436,11 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
380 * Add the passed pages to the LRU, then drop the caller's refcount 436 * Add the passed pages to the LRU, then drop the caller's refcount
381 * on them. Reinitialises the caller's pagevec. 437 * on them. Reinitialises the caller's pagevec.
382 */ 438 */
383void __pagevec_lru_add(struct pagevec *pvec) 439void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
384{ 440{
385 int i; 441 int i;
386 struct zone *zone = NULL; 442 struct zone *zone = NULL;
443 VM_BUG_ON(is_unevictable_lru(lru));
387 444
388 for (i = 0; i < pagevec_count(pvec); i++) { 445 for (i = 0; i < pagevec_count(pvec); i++) {
389 struct page *page = pvec->pages[i]; 446 struct page *page = pvec->pages[i];
@@ -395,9 +452,13 @@ void __pagevec_lru_add(struct pagevec *pvec)
395 zone = pagezone; 452 zone = pagezone;
396 spin_lock_irq(&zone->lru_lock); 453 spin_lock_irq(&zone->lru_lock);
397 } 454 }
455 VM_BUG_ON(PageActive(page));
456 VM_BUG_ON(PageUnevictable(page));
398 VM_BUG_ON(PageLRU(page)); 457 VM_BUG_ON(PageLRU(page));
399 SetPageLRU(page); 458 SetPageLRU(page);
400 add_page_to_inactive_list(zone, page); 459 if (is_active_lru(lru))
460 SetPageActive(page);
461 add_page_to_lru_list(zone, page, lru);
401 } 462 }
402 if (zone) 463 if (zone)
403 spin_unlock_irq(&zone->lru_lock); 464 spin_unlock_irq(&zone->lru_lock);
@@ -405,48 +466,45 @@ void __pagevec_lru_add(struct pagevec *pvec)
405 pagevec_reinit(pvec); 466 pagevec_reinit(pvec);
406} 467}
407 468
408EXPORT_SYMBOL(__pagevec_lru_add); 469EXPORT_SYMBOL(____pagevec_lru_add);
409 470
410void __pagevec_lru_add_active(struct pagevec *pvec) 471/*
472 * Try to drop buffers from the pages in a pagevec
473 */
474void pagevec_strip(struct pagevec *pvec)
411{ 475{
412 int i; 476 int i;
413 struct zone *zone = NULL;
414 477
415 for (i = 0; i < pagevec_count(pvec); i++) { 478 for (i = 0; i < pagevec_count(pvec); i++) {
416 struct page *page = pvec->pages[i]; 479 struct page *page = pvec->pages[i];
417 struct zone *pagezone = page_zone(page);
418 480
419 if (pagezone != zone) { 481 if (PagePrivate(page) && trylock_page(page)) {
420 if (zone) 482 if (PagePrivate(page))
421 spin_unlock_irq(&zone->lru_lock); 483 try_to_release_page(page, 0);
422 zone = pagezone; 484 unlock_page(page);
423 spin_lock_irq(&zone->lru_lock);
424 } 485 }
425 VM_BUG_ON(PageLRU(page));
426 SetPageLRU(page);
427 VM_BUG_ON(PageActive(page));
428 SetPageActive(page);
429 add_page_to_active_list(zone, page);
430 } 486 }
431 if (zone)
432 spin_unlock_irq(&zone->lru_lock);
433 release_pages(pvec->pages, pvec->nr, pvec->cold);
434 pagevec_reinit(pvec);
435} 487}
436 488
437/* 489/**
438 * Try to drop buffers from the pages in a pagevec 490 * pagevec_swap_free - try to free swap space from the pages in a pagevec
491 * @pvec: pagevec with swapcache pages to free the swap space of
492 *
493 * The caller needs to hold an extra reference to each page and
494 * not hold the page lock on the pages. This function uses a
495 * trylock on the page lock so it may not always free the swap
496 * space associated with a page.
439 */ 497 */
440void pagevec_strip(struct pagevec *pvec) 498void pagevec_swap_free(struct pagevec *pvec)
441{ 499{
442 int i; 500 int i;
443 501
444 for (i = 0; i < pagevec_count(pvec); i++) { 502 for (i = 0; i < pagevec_count(pvec); i++) {
445 struct page *page = pvec->pages[i]; 503 struct page *page = pvec->pages[i];
446 504
447 if (PagePrivate(page) && trylock_page(page)) { 505 if (PageSwapCache(page) && trylock_page(page)) {
448 if (PagePrivate(page)) 506 if (PageSwapCache(page))
449 try_to_release_page(page, 0); 507 remove_exclusive_swap_page_ref(page);
450 unlock_page(page); 508 unlock_page(page);
451 } 509 }
452 } 510 }