diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-09-26 02:31:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:51 -0400 |
commit | 972d1a7b140569084439a81265a0f15b74e924e0 (patch) | |
tree | e86e676e407503ef3d98020a88bb925235f11434 /mm | |
parent | 8417bba4b151346ed475fcc923693c9e3be89063 (diff) |
[PATCH] ZVC: Support NR_SLAB_RECLAIMABLE / NR_SLAB_UNRECLAIMABLE
Remove the atomic counter for slab_reclaim_pages and replace the counter
and NR_SLAB with two ZVC counter that account for unreclaimable and
reclaimable slab pages: NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE.
Change the check in vmscan.c to refer to to NR_SLAB_RECLAIMABLE. The
intend seems to be to check for slab pages that could be freed.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 24 | ||||
-rw-r--r-- | mm/slob.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 3 |
7 files changed, 18 insertions, 22 deletions
@@ -116,7 +116,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
116 | * which are reclaimable, under pressure. The dentry | 116 | * which are reclaimable, under pressure. The dentry |
117 | * cache and most inode caches should fall into this | 117 | * cache and most inode caches should fall into this |
118 | */ | 118 | */ |
119 | free += atomic_read(&slab_reclaim_pages); | 119 | free += global_page_state(NR_SLAB_RECLAIMABLE); |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Leave the last 3% for root | 122 | * Leave the last 3% for root |
diff --git a/mm/nommu.c b/mm/nommu.c index c576df71e3bb..d99dea31e443 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1133,7 +1133,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
1133 | * which are reclaimable, under pressure. The dentry | 1133 | * which are reclaimable, under pressure. The dentry |
1134 | * cache and most inode caches should fall into this | 1134 | * cache and most inode caches should fall into this |
1135 | */ | 1135 | */ |
1136 | free += atomic_read(&slab_reclaim_pages); | 1136 | free += global_page_state(NR_SLAB_RECLAIMABLE); |
1137 | 1137 | ||
1138 | /* | 1138 | /* |
1139 | * Leave the last 3% for root | 1139 | * Leave the last 3% for root |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5da6bc4e0a6b..47e98423b30d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1304,7 +1304,8 @@ void show_free_areas(void) | |||
1304 | global_page_state(NR_WRITEBACK), | 1304 | global_page_state(NR_WRITEBACK), |
1305 | global_page_state(NR_UNSTABLE_NFS), | 1305 | global_page_state(NR_UNSTABLE_NFS), |
1306 | nr_free_pages(), | 1306 | nr_free_pages(), |
1307 | global_page_state(NR_SLAB), | 1307 | global_page_state(NR_SLAB_RECLAIMABLE) + |
1308 | global_page_state(NR_SLAB_UNRECLAIMABLE), | ||
1308 | global_page_state(NR_FILE_MAPPED), | 1309 | global_page_state(NR_FILE_MAPPED), |
1309 | global_page_state(NR_PAGETABLE)); | 1310 | global_page_state(NR_PAGETABLE)); |
1310 | 1311 | ||
@@ -736,14 +736,6 @@ static DEFINE_MUTEX(cache_chain_mutex); | |||
736 | static struct list_head cache_chain; | 736 | static struct list_head cache_chain; |
737 | 737 | ||
738 | /* | 738 | /* |
739 | * vm_enough_memory() looks at this to determine how many slab-allocated pages | ||
740 | * are possibly freeable under pressure | ||
741 | * | ||
742 | * SLAB_RECLAIM_ACCOUNT turns this on per-slab | ||
743 | */ | ||
744 | atomic_t slab_reclaim_pages; | ||
745 | |||
746 | /* | ||
747 | * chicken and egg problem: delay the per-cpu array allocation | 739 | * chicken and egg problem: delay the per-cpu array allocation |
748 | * until the general caches are up. | 740 | * until the general caches are up. |
749 | */ | 741 | */ |
@@ -1580,8 +1572,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1580 | 1572 | ||
1581 | nr_pages = (1 << cachep->gfporder); | 1573 | nr_pages = (1 << cachep->gfporder); |
1582 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1574 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1583 | atomic_add(nr_pages, &slab_reclaim_pages); | 1575 | add_zone_page_state(page_zone(page), |
1584 | add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); | 1576 | NR_SLAB_RECLAIMABLE, nr_pages); |
1577 | else | ||
1578 | add_zone_page_state(page_zone(page), | ||
1579 | NR_SLAB_UNRECLAIMABLE, nr_pages); | ||
1585 | for (i = 0; i < nr_pages; i++) | 1580 | for (i = 0; i < nr_pages; i++) |
1586 | __SetPageSlab(page + i); | 1581 | __SetPageSlab(page + i); |
1587 | return page_address(page); | 1582 | return page_address(page); |
@@ -1596,7 +1591,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1596 | struct page *page = virt_to_page(addr); | 1591 | struct page *page = virt_to_page(addr); |
1597 | const unsigned long nr_freed = i; | 1592 | const unsigned long nr_freed = i; |
1598 | 1593 | ||
1599 | sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); | 1594 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1595 | sub_zone_page_state(page_zone(page), | ||
1596 | NR_SLAB_RECLAIMABLE, nr_freed); | ||
1597 | else | ||
1598 | sub_zone_page_state(page_zone(page), | ||
1599 | NR_SLAB_UNRECLAIMABLE, nr_freed); | ||
1600 | while (i--) { | 1600 | while (i--) { |
1601 | BUG_ON(!PageSlab(page)); | 1601 | BUG_ON(!PageSlab(page)); |
1602 | __ClearPageSlab(page); | 1602 | __ClearPageSlab(page); |
@@ -1605,8 +1605,6 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1605 | if (current->reclaim_state) | 1605 | if (current->reclaim_state) |
1606 | current->reclaim_state->reclaimed_slab += nr_freed; | 1606 | current->reclaim_state->reclaimed_slab += nr_freed; |
1607 | free_pages((unsigned long)addr, cachep->gfporder); | 1607 | free_pages((unsigned long)addr, cachep->gfporder); |
1608 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | ||
1609 | atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); | ||
1610 | } | 1608 | } |
1611 | 1609 | ||
1612 | static void kmem_rcu_free(struct rcu_head *head) | 1610 | static void kmem_rcu_free(struct rcu_head *head) |
@@ -339,7 +339,3 @@ void kmem_cache_init(void) | |||
339 | 339 | ||
340 | mod_timer(&slob_timer, jiffies + HZ); | 340 | mod_timer(&slob_timer, jiffies + HZ); |
341 | } | 341 | } |
342 | |||
343 | atomic_t slab_reclaim_pages = ATOMIC_INIT(0); | ||
344 | EXPORT_SYMBOL(slab_reclaim_pages); | ||
345 | |||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 5154c25e8440..349797ba4bac 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1378,7 +1378,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1378 | for_each_zone(zone) | 1378 | for_each_zone(zone) |
1379 | lru_pages += zone->nr_active + zone->nr_inactive; | 1379 | lru_pages += zone->nr_active + zone->nr_inactive; |
1380 | 1380 | ||
1381 | nr_slab = global_page_state(NR_SLAB); | 1381 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); |
1382 | /* If slab caches are huge, it's better to hit them first */ | 1382 | /* If slab caches are huge, it's better to hit them first */ |
1383 | while (nr_slab >= lru_pages) { | 1383 | while (nr_slab >= lru_pages) { |
1384 | reclaim_state.reclaimed_slab = 0; | 1384 | reclaim_state.reclaimed_slab = 0; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 968c0072e19a..490d8c1a0ded 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -458,7 +458,8 @@ static char *vmstat_text[] = { | |||
458 | "nr_anon_pages", | 458 | "nr_anon_pages", |
459 | "nr_mapped", | 459 | "nr_mapped", |
460 | "nr_file_pages", | 460 | "nr_file_pages", |
461 | "nr_slab", | 461 | "nr_slab_reclaimable", |
462 | "nr_slab_unreclaimable", | ||
462 | "nr_page_table_pages", | 463 | "nr_page_table_pages", |
463 | "nr_dirty", | 464 | "nr_dirty", |
464 | "nr_writeback", | 465 | "nr_writeback", |