diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-09-26 02:31:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:51 -0400 |
commit | 972d1a7b140569084439a81265a0f15b74e924e0 (patch) | |
tree | e86e676e407503ef3d98020a88bb925235f11434 | |
parent | 8417bba4b151346ed475fcc923693c9e3be89063 (diff) |
[PATCH] ZVC: Support NR_SLAB_RECLAIMABLE / NR_SLAB_UNRECLAIMABLE
Remove the atomic counter for slab_reclaim_pages and replace the counter
and NR_SLAB with two ZVC counter that account for unreclaimable and
reclaimable slab pages: NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE.
Change the check in vmscan.c to refer to to NR_SLAB_RECLAIMABLE. The
intend seems to be to check for slab pages that could be freed.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/mm/pgtable.c | 4 | ||||
-rw-r--r-- | drivers/base/node.c | 9 | ||||
-rw-r--r-- | fs/proc/proc_misc.c | 7 | ||||
-rw-r--r-- | include/linux/mmzone.h | 3 | ||||
-rw-r--r-- | include/linux/slab.h | 2 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 24 | ||||
-rw-r--r-- | mm/slob.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 3 |
12 files changed, 36 insertions, 29 deletions
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index bd98768d8764..a9f4910a22f8 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c | |||
@@ -60,7 +60,9 @@ void show_mem(void) | |||
60 | printk(KERN_INFO "%lu pages writeback\n", | 60 | printk(KERN_INFO "%lu pages writeback\n", |
61 | global_page_state(NR_WRITEBACK)); | 61 | global_page_state(NR_WRITEBACK)); |
62 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); | 62 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
63 | printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); | 63 | printk(KERN_INFO "%lu pages slab\n", |
64 | global_page_state(NR_SLAB_RECLAIMABLE) + | ||
65 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | ||
64 | printk(KERN_INFO "%lu pages pagetables\n", | 66 | printk(KERN_INFO "%lu pages pagetables\n", |
65 | global_page_state(NR_PAGETABLE)); | 67 | global_page_state(NR_PAGETABLE)); |
66 | } | 68 | } |
diff --git a/drivers/base/node.c b/drivers/base/node.c index e09f5c2c11ee..001e6f6b9c1b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -68,7 +68,9 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) | |||
68 | "Node %d PageTables: %8lu kB\n" | 68 | "Node %d PageTables: %8lu kB\n" |
69 | "Node %d NFS_Unstable: %8lu kB\n" | 69 | "Node %d NFS_Unstable: %8lu kB\n" |
70 | "Node %d Bounce: %8lu kB\n" | 70 | "Node %d Bounce: %8lu kB\n" |
71 | "Node %d Slab: %8lu kB\n", | 71 | "Node %d Slab: %8lu kB\n" |
72 | "Node %d SReclaimable: %8lu kB\n" | ||
73 | "Node %d SUnreclaim: %8lu kB\n", | ||
72 | nid, K(i.totalram), | 74 | nid, K(i.totalram), |
73 | nid, K(i.freeram), | 75 | nid, K(i.freeram), |
74 | nid, K(i.totalram - i.freeram), | 76 | nid, K(i.totalram - i.freeram), |
@@ -88,7 +90,10 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) | |||
88 | nid, K(node_page_state(nid, NR_PAGETABLE)), | 90 | nid, K(node_page_state(nid, NR_PAGETABLE)), |
89 | nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), | 91 | nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), |
90 | nid, K(node_page_state(nid, NR_BOUNCE)), | 92 | nid, K(node_page_state(nid, NR_BOUNCE)), |
91 | nid, K(node_page_state(nid, NR_SLAB))); | 93 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + |
94 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), | ||
95 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), | ||
96 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); | ||
92 | n += hugetlb_report_node_meminfo(nid, buf + n); | 97 | n += hugetlb_report_node_meminfo(nid, buf + n); |
93 | return n; | 98 | return n; |
94 | } | 99 | } |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index caa0a51560a0..5bbd60896050 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -170,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
170 | "AnonPages: %8lu kB\n" | 170 | "AnonPages: %8lu kB\n" |
171 | "Mapped: %8lu kB\n" | 171 | "Mapped: %8lu kB\n" |
172 | "Slab: %8lu kB\n" | 172 | "Slab: %8lu kB\n" |
173 | "SReclaimable: %8lu kB\n" | ||
174 | "SUnreclaim: %8lu kB\n" | ||
173 | "PageTables: %8lu kB\n" | 175 | "PageTables: %8lu kB\n" |
174 | "NFS_Unstable: %8lu kB\n" | 176 | "NFS_Unstable: %8lu kB\n" |
175 | "Bounce: %8lu kB\n" | 177 | "Bounce: %8lu kB\n" |
@@ -197,7 +199,10 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
197 | K(global_page_state(NR_WRITEBACK)), | 199 | K(global_page_state(NR_WRITEBACK)), |
198 | K(global_page_state(NR_ANON_PAGES)), | 200 | K(global_page_state(NR_ANON_PAGES)), |
199 | K(global_page_state(NR_FILE_MAPPED)), | 201 | K(global_page_state(NR_FILE_MAPPED)), |
200 | K(global_page_state(NR_SLAB)), | 202 | K(global_page_state(NR_SLAB_RECLAIMABLE) + |
203 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | ||
204 | K(global_page_state(NR_SLAB_RECLAIMABLE)), | ||
205 | K(global_page_state(NR_SLAB_UNRECLAIMABLE)), | ||
201 | K(global_page_state(NR_PAGETABLE)), | 206 | K(global_page_state(NR_PAGETABLE)), |
202 | K(global_page_state(NR_UNSTABLE_NFS)), | 207 | K(global_page_state(NR_UNSTABLE_NFS)), |
203 | K(global_page_state(NR_BOUNCE)), | 208 | K(global_page_state(NR_BOUNCE)), |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a703527e2b45..08c41b9f92e0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -51,7 +51,8 @@ enum zone_stat_item { | |||
51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
52 | only modified from process context */ | 52 | only modified from process context */ |
53 | NR_FILE_PAGES, | 53 | NR_FILE_PAGES, |
54 | NR_SLAB, /* Pages used by slab allocator */ | 54 | NR_SLAB_RECLAIMABLE, |
55 | NR_SLAB_UNRECLAIMABLE, | ||
55 | NR_PAGETABLE, /* used for pagetables */ | 56 | NR_PAGETABLE, /* used for pagetables */ |
56 | NR_FILE_DIRTY, | 57 | NR_FILE_DIRTY, |
57 | NR_WRITEBACK, | 58 | NR_WRITEBACK, |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 2f6bef6a98c9..66d6eb78d1c6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -284,8 +284,6 @@ extern kmem_cache_t *fs_cachep; | |||
284 | extern kmem_cache_t *sighand_cachep; | 284 | extern kmem_cache_t *sighand_cachep; |
285 | extern kmem_cache_t *bio_cachep; | 285 | extern kmem_cache_t *bio_cachep; |
286 | 286 | ||
287 | extern atomic_t slab_reclaim_pages; | ||
288 | |||
289 | #endif /* __KERNEL__ */ | 287 | #endif /* __KERNEL__ */ |
290 | 288 | ||
291 | #endif /* _LINUX_SLAB_H */ | 289 | #endif /* _LINUX_SLAB_H */ |
@@ -116,7 +116,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
116 | * which are reclaimable, under pressure. The dentry | 116 | * which are reclaimable, under pressure. The dentry |
117 | * cache and most inode caches should fall into this | 117 | * cache and most inode caches should fall into this |
118 | */ | 118 | */ |
119 | free += atomic_read(&slab_reclaim_pages); | 119 | free += global_page_state(NR_SLAB_RECLAIMABLE); |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Leave the last 3% for root | 122 | * Leave the last 3% for root |
diff --git a/mm/nommu.c b/mm/nommu.c index c576df71e3bb..d99dea31e443 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1133,7 +1133,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
1133 | * which are reclaimable, under pressure. The dentry | 1133 | * which are reclaimable, under pressure. The dentry |
1134 | * cache and most inode caches should fall into this | 1134 | * cache and most inode caches should fall into this |
1135 | */ | 1135 | */ |
1136 | free += atomic_read(&slab_reclaim_pages); | 1136 | free += global_page_state(NR_SLAB_RECLAIMABLE); |
1137 | 1137 | ||
1138 | /* | 1138 | /* |
1139 | * Leave the last 3% for root | 1139 | * Leave the last 3% for root |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5da6bc4e0a6b..47e98423b30d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1304,7 +1304,8 @@ void show_free_areas(void) | |||
1304 | global_page_state(NR_WRITEBACK), | 1304 | global_page_state(NR_WRITEBACK), |
1305 | global_page_state(NR_UNSTABLE_NFS), | 1305 | global_page_state(NR_UNSTABLE_NFS), |
1306 | nr_free_pages(), | 1306 | nr_free_pages(), |
1307 | global_page_state(NR_SLAB), | 1307 | global_page_state(NR_SLAB_RECLAIMABLE) + |
1308 | global_page_state(NR_SLAB_UNRECLAIMABLE), | ||
1308 | global_page_state(NR_FILE_MAPPED), | 1309 | global_page_state(NR_FILE_MAPPED), |
1309 | global_page_state(NR_PAGETABLE)); | 1310 | global_page_state(NR_PAGETABLE)); |
1310 | 1311 | ||
@@ -736,14 +736,6 @@ static DEFINE_MUTEX(cache_chain_mutex); | |||
736 | static struct list_head cache_chain; | 736 | static struct list_head cache_chain; |
737 | 737 | ||
738 | /* | 738 | /* |
739 | * vm_enough_memory() looks at this to determine how many slab-allocated pages | ||
740 | * are possibly freeable under pressure | ||
741 | * | ||
742 | * SLAB_RECLAIM_ACCOUNT turns this on per-slab | ||
743 | */ | ||
744 | atomic_t slab_reclaim_pages; | ||
745 | |||
746 | /* | ||
747 | * chicken and egg problem: delay the per-cpu array allocation | 739 | * chicken and egg problem: delay the per-cpu array allocation |
748 | * until the general caches are up. | 740 | * until the general caches are up. |
749 | */ | 741 | */ |
@@ -1580,8 +1572,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1580 | 1572 | ||
1581 | nr_pages = (1 << cachep->gfporder); | 1573 | nr_pages = (1 << cachep->gfporder); |
1582 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1574 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1583 | atomic_add(nr_pages, &slab_reclaim_pages); | 1575 | add_zone_page_state(page_zone(page), |
1584 | add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); | 1576 | NR_SLAB_RECLAIMABLE, nr_pages); |
1577 | else | ||
1578 | add_zone_page_state(page_zone(page), | ||
1579 | NR_SLAB_UNRECLAIMABLE, nr_pages); | ||
1585 | for (i = 0; i < nr_pages; i++) | 1580 | for (i = 0; i < nr_pages; i++) |
1586 | __SetPageSlab(page + i); | 1581 | __SetPageSlab(page + i); |
1587 | return page_address(page); | 1582 | return page_address(page); |
@@ -1596,7 +1591,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1596 | struct page *page = virt_to_page(addr); | 1591 | struct page *page = virt_to_page(addr); |
1597 | const unsigned long nr_freed = i; | 1592 | const unsigned long nr_freed = i; |
1598 | 1593 | ||
1599 | sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); | 1594 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1595 | sub_zone_page_state(page_zone(page), | ||
1596 | NR_SLAB_RECLAIMABLE, nr_freed); | ||
1597 | else | ||
1598 | sub_zone_page_state(page_zone(page), | ||
1599 | NR_SLAB_UNRECLAIMABLE, nr_freed); | ||
1600 | while (i--) { | 1600 | while (i--) { |
1601 | BUG_ON(!PageSlab(page)); | 1601 | BUG_ON(!PageSlab(page)); |
1602 | __ClearPageSlab(page); | 1602 | __ClearPageSlab(page); |
@@ -1605,8 +1605,6 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1605 | if (current->reclaim_state) | 1605 | if (current->reclaim_state) |
1606 | current->reclaim_state->reclaimed_slab += nr_freed; | 1606 | current->reclaim_state->reclaimed_slab += nr_freed; |
1607 | free_pages((unsigned long)addr, cachep->gfporder); | 1607 | free_pages((unsigned long)addr, cachep->gfporder); |
1608 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | ||
1609 | atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); | ||
1610 | } | 1608 | } |
1611 | 1609 | ||
1612 | static void kmem_rcu_free(struct rcu_head *head) | 1610 | static void kmem_rcu_free(struct rcu_head *head) |
@@ -339,7 +339,3 @@ void kmem_cache_init(void) | |||
339 | 339 | ||
340 | mod_timer(&slob_timer, jiffies + HZ); | 340 | mod_timer(&slob_timer, jiffies + HZ); |
341 | } | 341 | } |
342 | |||
343 | atomic_t slab_reclaim_pages = ATOMIC_INIT(0); | ||
344 | EXPORT_SYMBOL(slab_reclaim_pages); | ||
345 | |||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 5154c25e8440..349797ba4bac 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1378,7 +1378,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1378 | for_each_zone(zone) | 1378 | for_each_zone(zone) |
1379 | lru_pages += zone->nr_active + zone->nr_inactive; | 1379 | lru_pages += zone->nr_active + zone->nr_inactive; |
1380 | 1380 | ||
1381 | nr_slab = global_page_state(NR_SLAB); | 1381 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); |
1382 | /* If slab caches are huge, it's better to hit them first */ | 1382 | /* If slab caches are huge, it's better to hit them first */ |
1383 | while (nr_slab >= lru_pages) { | 1383 | while (nr_slab >= lru_pages) { |
1384 | reclaim_state.reclaimed_slab = 0; | 1384 | reclaim_state.reclaimed_slab = 0; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 968c0072e19a..490d8c1a0ded 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -458,7 +458,8 @@ static char *vmstat_text[] = { | |||
458 | "nr_anon_pages", | 458 | "nr_anon_pages", |
459 | "nr_mapped", | 459 | "nr_mapped", |
460 | "nr_file_pages", | 460 | "nr_file_pages", |
461 | "nr_slab", | 461 | "nr_slab_reclaimable", |
462 | "nr_slab_unreclaimable", | ||
462 | "nr_page_table_pages", | 463 | "nr_page_table_pages", |
463 | "nr_dirty", | 464 | "nr_dirty", |
464 | "nr_writeback", | 465 | "nr_writeback", |