diff options
| author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 04:55:34 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:34 -0400 |
| commit | 65ba55f500a37272985d071c9bbb35256a2f7c14 (patch) | |
| tree | e7735326ef2d2dca9d00a6c5ae47e9eb03c7834f /mm | |
| parent | 2244b95a7bcf8d24196f8a3a44187ba5dfff754c (diff) | |
[PATCH] zoned vm counters: convert nr_mapped to per zone counter
nr_mapped is important because it allows a determination of how many pages of
a zone are not mapped, which would allow a more efficient means of determining
when we need to reclaim memory in a zone.
We take the nr_mapped field out of the page state structure and define a new
per zone counter named NR_FILE_MAPPED (the anonymous pages will be split off
from NR_MAPPED in the next patch).
We replace the use of nr_mapped in various kernel locations. This avoids the
looping over all processors in try_to_free_pages(), writeback, reclaim (swap +
zone reclaim).
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/page-writeback.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 2 | ||||
| -rw-r--r-- | mm/rmap.c | 6 | ||||
| -rw-r--r-- | mm/vmscan.c | 8 | ||||
| -rw-r--r-- | mm/vmstat.c | 2 |
5 files changed, 10 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4ec7026c7bab..60c7244c42e4 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -111,7 +111,7 @@ static void get_writeback_state(struct writeback_state *wbs) | |||
| 111 | { | 111 | { |
| 112 | wbs->nr_dirty = read_page_state(nr_dirty); | 112 | wbs->nr_dirty = read_page_state(nr_dirty); |
| 113 | wbs->nr_unstable = read_page_state(nr_unstable); | 113 | wbs->nr_unstable = read_page_state(nr_unstable); |
| 114 | wbs->nr_mapped = read_page_state(nr_mapped); | 114 | wbs->nr_mapped = global_page_state(NR_FILE_MAPPED); |
| 115 | wbs->nr_writeback = read_page_state(nr_writeback); | 115 | wbs->nr_writeback = read_page_state(nr_writeback); |
| 116 | } | 116 | } |
| 117 | 117 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a877fecc300..04dd2b01b2b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1319,7 +1319,7 @@ void show_free_areas(void) | |||
| 1319 | ps.nr_unstable, | 1319 | ps.nr_unstable, |
| 1320 | nr_free_pages(), | 1320 | nr_free_pages(), |
| 1321 | ps.nr_slab, | 1321 | ps.nr_slab, |
| 1322 | ps.nr_mapped, | 1322 | global_page_state(NR_FILE_MAPPED), |
| 1323 | ps.nr_page_table_pages); | 1323 | ps.nr_page_table_pages); |
| 1324 | 1324 | ||
| 1325 | for_each_zone(zone) { | 1325 | for_each_zone(zone) { |
| @@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page, | |||
| 455 | * nr_mapped state can be updated without turning off | 455 | * nr_mapped state can be updated without turning off |
| 456 | * interrupts because it is not modified via interrupt. | 456 | * interrupts because it is not modified via interrupt. |
| 457 | */ | 457 | */ |
| 458 | __inc_page_state(nr_mapped); | 458 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
| 459 | } | 459 | } |
| 460 | 460 | ||
| 461 | /** | 461 | /** |
| @@ -499,7 +499,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
| 499 | void page_add_file_rmap(struct page *page) | 499 | void page_add_file_rmap(struct page *page) |
| 500 | { | 500 | { |
| 501 | if (atomic_inc_and_test(&page->_mapcount)) | 501 | if (atomic_inc_and_test(&page->_mapcount)) |
| 502 | __inc_page_state(nr_mapped); | 502 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | /** | 505 | /** |
| @@ -531,7 +531,7 @@ void page_remove_rmap(struct page *page) | |||
| 531 | */ | 531 | */ |
| 532 | if (page_test_and_clear_dirty(page)) | 532 | if (page_test_and_clear_dirty(page)) |
| 533 | set_page_dirty(page); | 533 | set_page_dirty(page); |
| 534 | __dec_page_state(nr_mapped); | 534 | __dec_zone_page_state(page, NR_FILE_MAPPED); |
| 535 | } | 535 | } |
| 536 | } | 536 | } |
| 537 | 537 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index eeacb0d695c3..d2caf7471cf1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -990,7 +990,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
| 990 | } | 990 | } |
| 991 | 991 | ||
| 992 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 992 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
| 993 | sc.nr_mapped = read_page_state(nr_mapped); | 993 | sc.nr_mapped = global_page_state(NR_FILE_MAPPED); |
| 994 | sc.nr_scanned = 0; | 994 | sc.nr_scanned = 0; |
| 995 | if (!priority) | 995 | if (!priority) |
| 996 | disable_swap_token(); | 996 | disable_swap_token(); |
| @@ -1075,7 +1075,7 @@ loop_again: | |||
| 1075 | total_scanned = 0; | 1075 | total_scanned = 0; |
| 1076 | nr_reclaimed = 0; | 1076 | nr_reclaimed = 0; |
| 1077 | sc.may_writepage = !laptop_mode; | 1077 | sc.may_writepage = !laptop_mode; |
| 1078 | sc.nr_mapped = read_page_state(nr_mapped); | 1078 | sc.nr_mapped = global_page_state(NR_FILE_MAPPED); |
| 1079 | 1079 | ||
| 1080 | inc_page_state(pageoutrun); | 1080 | inc_page_state(pageoutrun); |
| 1081 | 1081 | ||
| @@ -1407,7 +1407,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
| 1407 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 1407 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { |
| 1408 | unsigned long nr_to_scan = nr_pages - ret; | 1408 | unsigned long nr_to_scan = nr_pages - ret; |
| 1409 | 1409 | ||
| 1410 | sc.nr_mapped = read_page_state(nr_mapped); | 1410 | sc.nr_mapped = global_page_state(NR_FILE_MAPPED); |
| 1411 | sc.nr_scanned = 0; | 1411 | sc.nr_scanned = 0; |
| 1412 | 1412 | ||
| 1413 | ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); | 1413 | ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); |
| @@ -1548,7 +1548,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
| 1548 | struct scan_control sc = { | 1548 | struct scan_control sc = { |
| 1549 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 1549 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), |
| 1550 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 1550 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), |
| 1551 | .nr_mapped = read_page_state(nr_mapped), | 1551 | .nr_mapped = global_page_state(NR_FILE_MAPPED), |
| 1552 | .swap_cluster_max = max_t(unsigned long, nr_pages, | 1552 | .swap_cluster_max = max_t(unsigned long, nr_pages, |
| 1553 | SWAP_CLUSTER_MAX), | 1553 | SWAP_CLUSTER_MAX), |
| 1554 | .gfp_mask = gfp_mask, | 1554 | .gfp_mask = gfp_mask, |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 210f9bbbb04f..4800091c129a 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -401,13 +401,13 @@ struct seq_operations fragmentation_op = { | |||
| 401 | 401 | ||
| 402 | static char *vmstat_text[] = { | 402 | static char *vmstat_text[] = { |
| 403 | /* Zoned VM counters */ | 403 | /* Zoned VM counters */ |
| 404 | "nr_mapped", | ||
| 404 | 405 | ||
| 405 | /* Page state */ | 406 | /* Page state */ |
| 406 | "nr_dirty", | 407 | "nr_dirty", |
| 407 | "nr_writeback", | 408 | "nr_writeback", |
| 408 | "nr_unstable", | 409 | "nr_unstable", |
| 409 | "nr_page_table_pages", | 410 | "nr_page_table_pages", |
| 410 | "nr_mapped", | ||
| 411 | "nr_slab", | 411 | "nr_slab", |
| 412 | 412 | ||
| 413 | "pgpgin", | 413 | "pgpgin", |
