diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-01-18 20:42:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-18 22:20:17 -0500 |
commit | f1fd1067ece574ab56e4a70878b9a5a1ed4c3c42 (patch) | |
tree | f7f9d092ac0ed8bd734245d803b563b5e751c8ad | |
parent | fc3012896337c83a056c496d7cfb0072e1591181 (diff) |
[PATCH] Zone reclaim: resurrect may_swap
Zone reclaim has a huge impact on NUMA performance (f.e. our maximum
throughput with XFS is raised from 4GB to 6GB/sec / page cache contamination
of numa nodes destroys locality if one just does a large copy operation which
results in performance dropping for good until reboot).
This patch:
Resurrect may_swap in struct scan_control
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/vmscan.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 827bf674577a..e5117b6897a9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -71,6 +71,9 @@ struct scan_control { | |||
71 | 71 | ||
72 | int may_writepage; | 72 | int may_writepage; |
73 | 73 | ||
74 | /* Can pages be swapped as part of reclaim? */ | ||
75 | int may_swap; | ||
76 | |||
74 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for | 77 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for |
75 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. | 78 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. |
76 | * In this context, it doesn't matter that we scan the | 79 | * In this context, it doesn't matter that we scan the |
@@ -458,6 +461,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
458 | * Try to allocate it some swap space here. | 461 | * Try to allocate it some swap space here. |
459 | */ | 462 | */ |
460 | if (PageAnon(page) && !PageSwapCache(page)) { | 463 | if (PageAnon(page) && !PageSwapCache(page)) { |
464 | if (!sc->may_swap) | ||
465 | goto keep_locked; | ||
461 | if (!add_to_swap(page, GFP_ATOMIC)) | 466 | if (!add_to_swap(page, GFP_ATOMIC)) |
462 | goto activate_locked; | 467 | goto activate_locked; |
463 | } | 468 | } |
@@ -1166,6 +1171,7 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
1166 | 1171 | ||
1167 | sc.gfp_mask = gfp_mask; | 1172 | sc.gfp_mask = gfp_mask; |
1168 | sc.may_writepage = 0; | 1173 | sc.may_writepage = 0; |
1174 | sc.may_swap = 1; | ||
1169 | 1175 | ||
1170 | inc_page_state(allocstall); | 1176 | inc_page_state(allocstall); |
1171 | 1177 | ||
@@ -1268,6 +1274,7 @@ loop_again: | |||
1268 | total_reclaimed = 0; | 1274 | total_reclaimed = 0; |
1269 | sc.gfp_mask = GFP_KERNEL; | 1275 | sc.gfp_mask = GFP_KERNEL; |
1270 | sc.may_writepage = 0; | 1276 | sc.may_writepage = 0; |
1277 | sc.may_swap = 1; | ||
1271 | sc.nr_mapped = read_page_state(nr_mapped); | 1278 | sc.nr_mapped = read_page_state(nr_mapped); |
1272 | 1279 | ||
1273 | inc_page_state(pageoutrun); | 1280 | inc_page_state(pageoutrun); |