aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:38 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:35 -0400
commit9a865ffa34b6117a5e0b67640a084d8c2e198c93 (patch)
treec295d5a0831df81eeeded3834f32f513b9ae05c7 /mm
parent34aa1330f9b3c5783d269851d467326525207422 (diff)
[PATCH] zoned vm counters: conversion of nr_slab to per zone counter
- Allows reclaim to access counter without looping over processor counts. - Allows accurate statistics on how many pages are used in a zone by the slab. This may become useful to balance slab allocations over various zones. [akpm@osdl.org: bugfix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c4
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c2
4 files changed, 5 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8350720f98a8..a38a11cfb483 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1318,7 +1318,7 @@ void show_free_areas(void)
1318 ps.nr_writeback, 1318 ps.nr_writeback,
1319 ps.nr_unstable, 1319 ps.nr_unstable,
1320 nr_free_pages(), 1320 nr_free_pages(),
1321 ps.nr_slab, 1321 global_page_state(NR_SLAB),
1322 global_page_state(NR_FILE_MAPPED), 1322 global_page_state(NR_FILE_MAPPED),
1323 ps.nr_page_table_pages); 1323 ps.nr_page_table_pages);
1324 1324
diff --git a/mm/slab.c b/mm/slab.c
index 0c33820038cb..5dcfb9044801 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1507,7 +1507,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1507 nr_pages = (1 << cachep->gfporder); 1507 nr_pages = (1 << cachep->gfporder);
1508 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1508 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1509 atomic_add(nr_pages, &slab_reclaim_pages); 1509 atomic_add(nr_pages, &slab_reclaim_pages);
1510 add_page_state(nr_slab, nr_pages); 1510 add_zone_page_state(page_zone(page), NR_SLAB, nr_pages);
1511 for (i = 0; i < nr_pages; i++) 1511 for (i = 0; i < nr_pages; i++)
1512 __SetPageSlab(page + i); 1512 __SetPageSlab(page + i);
1513 return page_address(page); 1513 return page_address(page);
@@ -1522,12 +1522,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1522 struct page *page = virt_to_page(addr); 1522 struct page *page = virt_to_page(addr);
1523 const unsigned long nr_freed = i; 1523 const unsigned long nr_freed = i;
1524 1524
1525 sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed);
1525 while (i--) { 1526 while (i--) {
1526 BUG_ON(!PageSlab(page)); 1527 BUG_ON(!PageSlab(page));
1527 __ClearPageSlab(page); 1528 __ClearPageSlab(page);
1528 page++; 1529 page++;
1529 } 1530 }
1530 sub_page_state(nr_slab, nr_freed);
1531 if (current->reclaim_state) 1531 if (current->reclaim_state)
1532 current->reclaim_state->reclaimed_slab += nr_freed; 1532 current->reclaim_state->reclaimed_slab += nr_freed;
1533 free_pages((unsigned long)addr, cachep->gfporder); 1533 free_pages((unsigned long)addr, cachep->gfporder);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0960846d649f..d6942436ac97 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1362,7 +1362,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1362 for_each_zone(zone) 1362 for_each_zone(zone)
1363 lru_pages += zone->nr_active + zone->nr_inactive; 1363 lru_pages += zone->nr_active + zone->nr_inactive;
1364 1364
1365 nr_slab = read_page_state(nr_slab); 1365 nr_slab = global_page_state(NR_SLAB);
1366 /* If slab caches are huge, it's better to hit them first */ 1366 /* If slab caches are huge, it's better to hit them first */
1367 while (nr_slab >= lru_pages) { 1367 while (nr_slab >= lru_pages) {
1368 reclaim_state.reclaimed_slab = 0; 1368 reclaim_state.reclaimed_slab = 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 3baf4dffa62a..dc9e69209223 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -398,13 +398,13 @@ static char *vmstat_text[] = {
398 "nr_anon_pages", 398 "nr_anon_pages",
399 "nr_mapped", 399 "nr_mapped",
400 "nr_file_pages", 400 "nr_file_pages",
401 "nr_slab",
401 402
402 /* Page state */ 403 /* Page state */
403 "nr_dirty", 404 "nr_dirty",
404 "nr_writeback", 405 "nr_writeback",
405 "nr_unstable", 406 "nr_unstable",
406 "nr_page_table_pages", 407 "nr_page_table_pages",
407 "nr_slab",
408 408
409 "pgpgin", 409 "pgpgin",
410 "pgpgout", 410 "pgpgout",