diff options
author | Rik van Riel <riel@redhat.com> | 2008-10-18 23:26:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 11:50:25 -0400 |
commit | 68a22394c286a2daf06ee8d65d8835f738faefa5 (patch) | |
tree | 1fb91d5bf57a1f6d1cabaac0a6f5d86060ebecb5 /mm | |
parent | f04e9ebbe4909f9a41efd55149bc353299f4e83b (diff) |
vmscan: free swap space on swap-in/activation
If vm_swap_full() (swap space more than 50% full), the system will free
swap space at swapin time. With this patch, the system will also free the
swap space in the pageout code, when we decide that the page is not a
candidate for swapout (and just wasting swap space).
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: MinChan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/swap.c | 24 | ||||
-rw-r--r-- | mm/swapfile.c | 25 | ||||
-rw-r--r-- | mm/vmscan.c | 7 |
3 files changed, 53 insertions, 3 deletions
@@ -428,6 +428,30 @@ void pagevec_strip(struct pagevec *pvec) | |||
428 | } | 428 | } |
429 | 429 | ||
430 | /** | 430 | /** |
431 | * pagevec_swap_free - try to free swap space from the pages in a pagevec | ||
432 | * @pvec: pagevec with swapcache pages to free the swap space of | ||
433 | * | ||
434 | * The caller needs to hold an extra reference to each page and | ||
435 | * not hold the page lock on the pages. This function uses a | ||
436 | * trylock on the page lock so it may not always free the swap | ||
437 | * space associated with a page. | ||
438 | */ | ||
439 | void pagevec_swap_free(struct pagevec *pvec) | ||
440 | { | ||
441 | int i; | ||
442 | |||
443 | for (i = 0; i < pagevec_count(pvec); i++) { | ||
444 | struct page *page = pvec->pages[i]; | ||
445 | |||
446 | if (PageSwapCache(page) && trylock_page(page)) { | ||
447 | if (PageSwapCache(page)) | ||
448 | remove_exclusive_swap_page_ref(page); | ||
449 | unlock_page(page); | ||
450 | } | ||
451 | } | ||
452 | } | ||
453 | |||
454 | /** | ||
431 | * pagevec_lookup - gang pagecache lookup | 455 | * pagevec_lookup - gang pagecache lookup |
432 | * @pvec: Where the resulting pages are placed | 456 | * @pvec: Where the resulting pages are placed |
433 | * @mapping: The address_space to search | 457 | * @mapping: The address_space to search |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1e330f2998fa..2a97fafa3d89 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -344,7 +344,7 @@ int can_share_swap_page(struct page *page) | |||
344 | * Work out if there are any other processes sharing this | 344 | * Work out if there are any other processes sharing this |
345 | * swap cache page. Free it if you can. Return success. | 345 | * swap cache page. Free it if you can. Return success. |
346 | */ | 346 | */ |
347 | int remove_exclusive_swap_page(struct page *page) | 347 | static int remove_exclusive_swap_page_count(struct page *page, int count) |
348 | { | 348 | { |
349 | int retval; | 349 | int retval; |
350 | struct swap_info_struct * p; | 350 | struct swap_info_struct * p; |
@@ -357,7 +357,7 @@ int remove_exclusive_swap_page(struct page *page) | |||
357 | return 0; | 357 | return 0; |
358 | if (PageWriteback(page)) | 358 | if (PageWriteback(page)) |
359 | return 0; | 359 | return 0; |
360 | if (page_count(page) != 2) /* 2: us + cache */ | 360 | if (page_count(page) != count) /* us + cache + ptes */ |
361 | return 0; | 361 | return 0; |
362 | 362 | ||
363 | entry.val = page_private(page); | 363 | entry.val = page_private(page); |
@@ -370,7 +370,7 @@ int remove_exclusive_swap_page(struct page *page) | |||
370 | if (p->swap_map[swp_offset(entry)] == 1) { | 370 | if (p->swap_map[swp_offset(entry)] == 1) { |
371 | /* Recheck the page count with the swapcache lock held.. */ | 371 | /* Recheck the page count with the swapcache lock held.. */ |
372 | spin_lock_irq(&swapper_space.tree_lock); | 372 | spin_lock_irq(&swapper_space.tree_lock); |
373 | if ((page_count(page) == 2) && !PageWriteback(page)) { | 373 | if ((page_count(page) == count) && !PageWriteback(page)) { |
374 | __delete_from_swap_cache(page); | 374 | __delete_from_swap_cache(page); |
375 | SetPageDirty(page); | 375 | SetPageDirty(page); |
376 | retval = 1; | 376 | retval = 1; |
@@ -388,6 +388,25 @@ int remove_exclusive_swap_page(struct page *page) | |||
388 | } | 388 | } |
389 | 389 | ||
390 | /* | 390 | /* |
391 | * Most of the time the page should have two references: one for the | ||
392 | * process and one for the swap cache. | ||
393 | */ | ||
394 | int remove_exclusive_swap_page(struct page *page) | ||
395 | { | ||
396 | return remove_exclusive_swap_page_count(page, 2); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * The pageout code holds an extra reference to the page. That raises | ||
401 | * the reference count to test for to 2 for a page that is only in the | ||
402 | * swap cache plus 1 for each process that maps the page. | ||
403 | */ | ||
404 | int remove_exclusive_swap_page_ref(struct page *page) | ||
405 | { | ||
406 | return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page)); | ||
407 | } | ||
408 | |||
409 | /* | ||
391 | * Free the swap entry like above, but also try to | 410 | * Free the swap entry like above, but also try to |
392 | * free the page cache entry if it is the last user. | 411 | * free the page cache entry if it is the last user. |
393 | */ | 412 | */ |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 46fdaa546b8d..e656035d3406 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -647,6 +647,9 @@ free_it: | |||
647 | continue; | 647 | continue; |
648 | 648 | ||
649 | activate_locked: | 649 | activate_locked: |
650 | /* Not a candidate for swapping, so reclaim swap space. */ | ||
651 | if (PageSwapCache(page) && vm_swap_full()) | ||
652 | remove_exclusive_swap_page_ref(page); | ||
650 | SetPageActive(page); | 653 | SetPageActive(page); |
651 | pgactivate++; | 654 | pgactivate++; |
652 | keep_locked: | 655 | keep_locked: |
@@ -1228,6 +1231,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1228 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); | 1231 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); |
1229 | pgmoved = 0; | 1232 | pgmoved = 0; |
1230 | spin_unlock_irq(&zone->lru_lock); | 1233 | spin_unlock_irq(&zone->lru_lock); |
1234 | if (vm_swap_full()) | ||
1235 | pagevec_swap_free(&pvec); | ||
1231 | __pagevec_release(&pvec); | 1236 | __pagevec_release(&pvec); |
1232 | spin_lock_irq(&zone->lru_lock); | 1237 | spin_lock_irq(&zone->lru_lock); |
1233 | } | 1238 | } |
@@ -1237,6 +1242,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1237 | __count_zone_vm_events(PGREFILL, zone, pgscanned); | 1242 | __count_zone_vm_events(PGREFILL, zone, pgscanned); |
1238 | __count_vm_events(PGDEACTIVATE, pgdeactivate); | 1243 | __count_vm_events(PGDEACTIVATE, pgdeactivate); |
1239 | spin_unlock_irq(&zone->lru_lock); | 1244 | spin_unlock_irq(&zone->lru_lock); |
1245 | if (vm_swap_full()) | ||
1246 | pagevec_swap_free(&pvec); | ||
1240 | 1247 | ||
1241 | pagevec_release(&pvec); | 1248 | pagevec_release(&pvec); |
1242 | } | 1249 | } |