diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
4 files changed, 5 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8350720f98a8..a38a11cfb483 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1318,7 +1318,7 @@ void show_free_areas(void) | |||
1318 | ps.nr_writeback, | 1318 | ps.nr_writeback, |
1319 | ps.nr_unstable, | 1319 | ps.nr_unstable, |
1320 | nr_free_pages(), | 1320 | nr_free_pages(), |
1321 | ps.nr_slab, | 1321 | global_page_state(NR_SLAB), |
1322 | global_page_state(NR_FILE_MAPPED), | 1322 | global_page_state(NR_FILE_MAPPED), |
1323 | ps.nr_page_table_pages); | 1323 | ps.nr_page_table_pages); |
1324 | 1324 | ||
@@ -1507,7 +1507,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1507 | nr_pages = (1 << cachep->gfporder); | 1507 | nr_pages = (1 << cachep->gfporder); |
1508 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1508 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1509 | atomic_add(nr_pages, &slab_reclaim_pages); | 1509 | atomic_add(nr_pages, &slab_reclaim_pages); |
1510 | add_page_state(nr_slab, nr_pages); | 1510 | add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); |
1511 | for (i = 0; i < nr_pages; i++) | 1511 | for (i = 0; i < nr_pages; i++) |
1512 | __SetPageSlab(page + i); | 1512 | __SetPageSlab(page + i); |
1513 | return page_address(page); | 1513 | return page_address(page); |
@@ -1522,12 +1522,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1522 | struct page *page = virt_to_page(addr); | 1522 | struct page *page = virt_to_page(addr); |
1523 | const unsigned long nr_freed = i; | 1523 | const unsigned long nr_freed = i; |
1524 | 1524 | ||
1525 | sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); | ||
1525 | while (i--) { | 1526 | while (i--) { |
1526 | BUG_ON(!PageSlab(page)); | 1527 | BUG_ON(!PageSlab(page)); |
1527 | __ClearPageSlab(page); | 1528 | __ClearPageSlab(page); |
1528 | page++; | 1529 | page++; |
1529 | } | 1530 | } |
1530 | sub_page_state(nr_slab, nr_freed); | ||
1531 | if (current->reclaim_state) | 1531 | if (current->reclaim_state) |
1532 | current->reclaim_state->reclaimed_slab += nr_freed; | 1532 | current->reclaim_state->reclaimed_slab += nr_freed; |
1533 | free_pages((unsigned long)addr, cachep->gfporder); | 1533 | free_pages((unsigned long)addr, cachep->gfporder); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 0960846d649f..d6942436ac97 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1362,7 +1362,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1362 | for_each_zone(zone) | 1362 | for_each_zone(zone) |
1363 | lru_pages += zone->nr_active + zone->nr_inactive; | 1363 | lru_pages += zone->nr_active + zone->nr_inactive; |
1364 | 1364 | ||
1365 | nr_slab = read_page_state(nr_slab); | 1365 | nr_slab = global_page_state(NR_SLAB); |
1366 | /* If slab caches are huge, it's better to hit them first */ | 1366 | /* If slab caches are huge, it's better to hit them first */ |
1367 | while (nr_slab >= lru_pages) { | 1367 | while (nr_slab >= lru_pages) { |
1368 | reclaim_state.reclaimed_slab = 0; | 1368 | reclaim_state.reclaimed_slab = 0; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 3baf4dffa62a..dc9e69209223 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -398,13 +398,13 @@ static char *vmstat_text[] = { | |||
398 | "nr_anon_pages", | 398 | "nr_anon_pages", |
399 | "nr_mapped", | 399 | "nr_mapped", |
400 | "nr_file_pages", | 400 | "nr_file_pages", |
401 | "nr_slab", | ||
401 | 402 | ||
402 | /* Page state */ | 403 | /* Page state */ |
403 | "nr_dirty", | 404 | "nr_dirty", |
404 | "nr_writeback", | 405 | "nr_writeback", |
405 | "nr_unstable", | 406 | "nr_unstable", |
406 | "nr_page_table_pages", | 407 | "nr_page_table_pages", |
407 | "nr_slab", | ||
408 | 408 | ||
409 | "pgpgin", | 409 | "pgpgin", |
410 | "pgpgout", | 410 | "pgpgout", |