aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/mm/pgtable.c2
-rw-r--r--drivers/base/node.c4
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/vmstat.h3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c4
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c2
9 files changed, 10 insertions, 12 deletions
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index f85f1a40e5c8..73ac3599a0ea 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -62,7 +62,7 @@ void show_mem(void)
62 printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty); 62 printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
63 printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); 63 printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
64 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); 64 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
65 printk(KERN_INFO "%lu pages slab\n", ps.nr_slab); 65 printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
66 printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages); 66 printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
67} 67}
68 68
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c3bf05158c6d..db116a8791c8 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -54,8 +54,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
54 ps.nr_dirty = 0; 54 ps.nr_dirty = 0;
55 if ((long)ps.nr_writeback < 0) 55 if ((long)ps.nr_writeback < 0)
56 ps.nr_writeback = 0; 56 ps.nr_writeback = 0;
57 if ((long)ps.nr_slab < 0)
58 ps.nr_slab = 0;
59 57
60 n = sprintf(buf, "\n" 58 n = sprintf(buf, "\n"
61 "Node %d MemTotal: %8lu kB\n" 59 "Node %d MemTotal: %8lu kB\n"
@@ -87,7 +85,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
87 nid, K(node_page_state(nid, NR_FILE_PAGES)), 85 nid, K(node_page_state(nid, NR_FILE_PAGES)),
88 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 86 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
89 nid, K(node_page_state(nid, NR_ANON_PAGES)), 87 nid, K(node_page_state(nid, NR_ANON_PAGES)),
90 nid, K(ps.nr_slab)); 88 nid, K(node_page_state(nid, NR_SLAB)));
91 n += hugetlb_report_node_meminfo(nid, buf + n); 89 n += hugetlb_report_node_meminfo(nid, buf + n);
92 return n; 90 return n;
93} 91}
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index ff809656ce31..16aaf7187bb3 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -194,7 +194,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
194 K(ps.nr_writeback), 194 K(ps.nr_writeback),
195 K(global_page_state(NR_ANON_PAGES)), 195 K(global_page_state(NR_ANON_PAGES)),
196 K(global_page_state(NR_FILE_MAPPED)), 196 K(global_page_state(NR_FILE_MAPPED)),
197 K(ps.nr_slab), 197 K(global_page_state(NR_SLAB)),
198 K(allowed), 198 K(allowed),
199 K(committed), 199 K(committed),
200 K(ps.nr_page_table_pages), 200 K(ps.nr_page_table_pages),
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 839e9a04fd49..67e03fc8533e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -51,6 +51,7 @@ enum zone_stat_item {
51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
52 only modified from process context */ 52 only modified from process context */
53 NR_FILE_PAGES, 53 NR_FILE_PAGES,
54 NR_SLAB, /* Pages used by slab allocator */
54 NR_VM_ZONE_STAT_ITEMS }; 55 NR_VM_ZONE_STAT_ITEMS };
55 56
56struct per_cpu_pages { 57struct per_cpu_pages {
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 8ab8229523e6..4b97381a2937 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -26,8 +26,7 @@ struct page_state {
26 unsigned long nr_writeback; /* Pages under writeback */ 26 unsigned long nr_writeback; /* Pages under writeback */
27 unsigned long nr_unstable; /* NFS unstable pages */ 27 unsigned long nr_unstable; /* NFS unstable pages */
28 unsigned long nr_page_table_pages;/* Pages used for pagetables */ 28 unsigned long nr_page_table_pages;/* Pages used for pagetables */
29 unsigned long nr_slab; /* In slab */ 29#define GET_PAGE_STATE_LAST nr_page_table_pages
30#define GET_PAGE_STATE_LAST nr_slab
31 30
32 /* 31 /*
33 * The below are zeroed by get_page_state(). Use get_full_page_state() 32 * The below are zeroed by get_page_state(). Use get_full_page_state()
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8350720f98a8..a38a11cfb483 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1318,7 +1318,7 @@ void show_free_areas(void)
1318 ps.nr_writeback, 1318 ps.nr_writeback,
1319 ps.nr_unstable, 1319 ps.nr_unstable,
1320 nr_free_pages(), 1320 nr_free_pages(),
1321 ps.nr_slab, 1321 global_page_state(NR_SLAB),
1322 global_page_state(NR_FILE_MAPPED), 1322 global_page_state(NR_FILE_MAPPED),
1323 ps.nr_page_table_pages); 1323 ps.nr_page_table_pages);
1324 1324
diff --git a/mm/slab.c b/mm/slab.c
index 0c33820038cb..5dcfb9044801 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1507,7 +1507,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1507 nr_pages = (1 << cachep->gfporder); 1507 nr_pages = (1 << cachep->gfporder);
1508 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1508 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1509 atomic_add(nr_pages, &slab_reclaim_pages); 1509 atomic_add(nr_pages, &slab_reclaim_pages);
1510 add_page_state(nr_slab, nr_pages); 1510 add_zone_page_state(page_zone(page), NR_SLAB, nr_pages);
1511 for (i = 0; i < nr_pages; i++) 1511 for (i = 0; i < nr_pages; i++)
1512 __SetPageSlab(page + i); 1512 __SetPageSlab(page + i);
1513 return page_address(page); 1513 return page_address(page);
@@ -1522,12 +1522,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1522 struct page *page = virt_to_page(addr); 1522 struct page *page = virt_to_page(addr);
1523 const unsigned long nr_freed = i; 1523 const unsigned long nr_freed = i;
1524 1524
1525 sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed);
1525 while (i--) { 1526 while (i--) {
1526 BUG_ON(!PageSlab(page)); 1527 BUG_ON(!PageSlab(page));
1527 __ClearPageSlab(page); 1528 __ClearPageSlab(page);
1528 page++; 1529 page++;
1529 } 1530 }
1530 sub_page_state(nr_slab, nr_freed);
1531 if (current->reclaim_state) 1531 if (current->reclaim_state)
1532 current->reclaim_state->reclaimed_slab += nr_freed; 1532 current->reclaim_state->reclaimed_slab += nr_freed;
1533 free_pages((unsigned long)addr, cachep->gfporder); 1533 free_pages((unsigned long)addr, cachep->gfporder);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0960846d649f..d6942436ac97 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1362,7 +1362,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1362 for_each_zone(zone) 1362 for_each_zone(zone)
1363 lru_pages += zone->nr_active + zone->nr_inactive; 1363 lru_pages += zone->nr_active + zone->nr_inactive;
1364 1364
1365 nr_slab = read_page_state(nr_slab); 1365 nr_slab = global_page_state(NR_SLAB);
1366 /* If slab caches are huge, it's better to hit them first */ 1366 /* If slab caches are huge, it's better to hit them first */
1367 while (nr_slab >= lru_pages) { 1367 while (nr_slab >= lru_pages) {
1368 reclaim_state.reclaimed_slab = 0; 1368 reclaim_state.reclaimed_slab = 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 3baf4dffa62a..dc9e69209223 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -398,13 +398,13 @@ static char *vmstat_text[] = {
398 "nr_anon_pages", 398 "nr_anon_pages",
399 "nr_mapped", 399 "nr_mapped",
400 "nr_file_pages", 400 "nr_file_pages",
401 "nr_slab",
401 402
402 /* Page state */ 403 /* Page state */
403 "nr_dirty", 404 "nr_dirty",
404 "nr_writeback", 405 "nr_writeback",
405 "nr_unstable", 406 "nr_unstable",
406 "nr_page_table_pages", 407 "nr_page_table_pages",
407 "nr_slab",
408 408
409 "pgpgin", 409 "pgpgin",
410 "pgpgout", 410 "pgpgout",