diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-14 06:19:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-14 06:19:59 -0400 |
commit | 8d7ccaa545490cdffdfaff0842436a8dd85cf47b (patch) | |
tree | 8129b5907161bc6ae26deb3645ce1e280c5e1f51 /mm/swap.c | |
parent | b2139aa0eec330c711c5a279db361e5ef1178e78 (diff) | |
parent | 30a2f3c60a84092c8084dfe788b710f8d0768cd4 (diff) |
Merge commit 'v2.6.27-rc3' into x86/prototypes
Conflicts:
include/asm-x86/dma-mapping.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 17 |
1 files changed, 9 insertions, 8 deletions
@@ -34,9 +34,9 @@ | |||
34 | /* How many pages do we try to swap or page in/out together? */ | 34 | /* How many pages do we try to swap or page in/out together? */ |
35 | int page_cluster; | 35 | int page_cluster; |
36 | 36 | ||
37 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; | 37 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs); |
38 | static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; | 38 | static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs); |
39 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, }; | 39 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * This path almost never happens for VM activity - pages are normally | 42 | * This path almost never happens for VM activity - pages are normally |
@@ -278,9 +278,10 @@ int lru_add_drain_all(void) | |||
278 | * Avoid taking zone->lru_lock if possible, but if it is taken, retain it | 278 | * Avoid taking zone->lru_lock if possible, but if it is taken, retain it |
279 | * for the remainder of the operation. | 279 | * for the remainder of the operation. |
280 | * | 280 | * |
281 | * The locking in this function is against shrink_cache(): we recheck the | 281 | * The locking in this function is against shrink_inactive_list(): we recheck |
282 | * page count inside the lock to see whether shrink_cache grabbed the page | 282 | * the page count inside the lock to see whether shrink_inactive_list() |
283 | * via the LRU. If it did, give up: shrink_cache will free it. | 283 | * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() |
284 | * will free it. | ||
284 | */ | 285 | */ |
285 | void release_pages(struct page **pages, int nr, int cold) | 286 | void release_pages(struct page **pages, int nr, int cold) |
286 | { | 287 | { |
@@ -443,7 +444,7 @@ void pagevec_strip(struct pagevec *pvec) | |||
443 | for (i = 0; i < pagevec_count(pvec); i++) { | 444 | for (i = 0; i < pagevec_count(pvec); i++) { |
444 | struct page *page = pvec->pages[i]; | 445 | struct page *page = pvec->pages[i]; |
445 | 446 | ||
446 | if (PagePrivate(page) && !TestSetPageLocked(page)) { | 447 | if (PagePrivate(page) && trylock_page(page)) { |
447 | if (PagePrivate(page)) | 448 | if (PagePrivate(page)) |
448 | try_to_release_page(page, 0); | 449 | try_to_release_page(page, 0); |
449 | unlock_page(page); | 450 | unlock_page(page); |
@@ -493,7 +494,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag); | |||
493 | */ | 494 | */ |
494 | #define ACCT_THRESHOLD max(16, NR_CPUS * 2) | 495 | #define ACCT_THRESHOLD max(16, NR_CPUS * 2) |
495 | 496 | ||
496 | static DEFINE_PER_CPU(long, committed_space) = 0; | 497 | static DEFINE_PER_CPU(long, committed_space); |
497 | 498 | ||
498 | void vm_acct_memory(long pages) | 499 | void vm_acct_memory(long pages) |
499 | { | 500 | { |