aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-02-10 04:43:02 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:17 -0500
commitd23ad42324cc4378132e51f2fc5c9ba6cbe75182 (patch)
tree6844416befb3988e432e8f422f3a369e2f760d39
parentc878538598d1e7ab41ecc0de8894e34e2fdef630 (diff)
[PATCH] Use ZVC for free_pages
This is again simplifies some of the VM counter calculations through the use of the ZVC consolidated counters. [michal.k.k.piotrowski@gmail.com: build fix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Michal Piotrowski <michal.k.k.piotrowski@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/power/swsusp.c5
-rw-r--r--mm/highmem.c3
-rw-r--r--mm/page_alloc.c37
-rw-r--r--mm/vmstat.c20
6 files changed, 25 insertions, 46 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9137d1b9735..824279c7884 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -47,6 +47,7 @@ struct zone_padding {
47#endif 47#endif
48 48
49enum zone_stat_item { 49enum zone_stat_item {
50 NR_FREE_PAGES,
50 NR_INACTIVE, 51 NR_INACTIVE,
51 NR_ACTIVE, 52 NR_ACTIVE,
52 NR_ANON_PAGES, /* Mapped anonymous pages */ 53 NR_ANON_PAGES, /* Mapped anonymous pages */
@@ -157,7 +158,6 @@ enum zone_type {
157 158
158struct zone { 159struct zone {
159 /* Fields commonly accessed by the page allocator */ 160 /* Fields commonly accessed by the page allocator */
160 unsigned long free_pages;
161 unsigned long pages_min, pages_low, pages_high; 161 unsigned long pages_min, pages_low, pages_high;
162 /* 162 /*
163 * We don't know if the memory that we're going to allocate will be freeable 163 * We don't know if the memory that we're going to allocate will be freeable
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c024606221c..fc53ad06812 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -591,7 +591,7 @@ static unsigned int count_free_highmem_pages(void)
591 591
592 for_each_zone(zone) 592 for_each_zone(zone)
593 if (populated_zone(zone) && is_highmem(zone)) 593 if (populated_zone(zone) && is_highmem(zone))
594 cnt += zone->free_pages; 594 cnt += zone_page_state(zone, NR_FREE_PAGES);
595 595
596 return cnt; 596 return cnt;
597} 597}
@@ -869,7 +869,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
869 for_each_zone(zone) { 869 for_each_zone(zone) {
870 meta += snapshot_additional_pages(zone); 870 meta += snapshot_additional_pages(zone);
871 if (!is_highmem(zone)) 871 if (!is_highmem(zone))
872 free += zone->free_pages; 872 free += zone_page_state(zone, NR_FREE_PAGES);
873 } 873 }
874 874
875 nr_pages += count_pages_for_highmem(nr_highmem); 875 nr_pages += count_pages_for_highmem(nr_highmem);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 31aa0390c77..7fb834397a0 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -230,9 +230,10 @@ int swsusp_shrink_memory(void)
230 for_each_zone (zone) 230 for_each_zone (zone)
231 if (populated_zone(zone)) { 231 if (populated_zone(zone)) {
232 if (is_highmem(zone)) { 232 if (is_highmem(zone)) {
233 highmem_size -= zone->free_pages; 233 highmem_size -=
234 zone_page_state(zone, NR_FREE_PAGES);
234 } else { 235 } else {
235 tmp -= zone->free_pages; 236 tmp -= zone_page_state(zone, NR_FREE_PAGES);
236 tmp += zone->lowmem_reserve[ZONE_NORMAL]; 237 tmp += zone->lowmem_reserve[ZONE_NORMAL];
237 tmp += snapshot_additional_pages(zone); 238 tmp += snapshot_additional_pages(zone);
238 } 239 }
diff --git a/mm/highmem.c b/mm/highmem.c
index 0206e7e5018..51e1c1995fe 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -47,7 +47,8 @@ unsigned int nr_free_highpages (void)
47 unsigned int pages = 0; 47 unsigned int pages = 0;
48 48
49 for_each_online_pgdat(pgdat) 49 for_each_online_pgdat(pgdat)
50 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; 50 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
51 NR_FREE_PAGES);
51 52
52 return pages; 53 return pages;
53} 54}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07c954e5327..ba62d8789f7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -395,7 +395,7 @@ static inline void __free_one_page(struct page *page,
395 VM_BUG_ON(page_idx & (order_size - 1)); 395 VM_BUG_ON(page_idx & (order_size - 1));
396 VM_BUG_ON(bad_range(zone, page)); 396 VM_BUG_ON(bad_range(zone, page));
397 397
398 zone->free_pages += order_size; 398 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
399 while (order < MAX_ORDER-1) { 399 while (order < MAX_ORDER-1) {
400 unsigned long combined_idx; 400 unsigned long combined_idx;
401 struct free_area *area; 401 struct free_area *area;
@@ -631,7 +631,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
631 list_del(&page->lru); 631 list_del(&page->lru);
632 rmv_page_order(page); 632 rmv_page_order(page);
633 area->nr_free--; 633 area->nr_free--;
634 zone->free_pages -= 1UL << order; 634 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
635 expand(zone, page, order, current_order, area); 635 expand(zone, page, order, current_order, area);
636 return page; 636 return page;
637 } 637 }
@@ -989,7 +989,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
989 int classzone_idx, int alloc_flags) 989 int classzone_idx, int alloc_flags)
990{ 990{
991 /* free_pages my go negative - that's OK */ 991 /* free_pages my go negative - that's OK */
992 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 992 long min = mark;
993 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
993 int o; 994 int o;
994 995
995 if (alloc_flags & ALLOC_HIGH) 996 if (alloc_flags & ALLOC_HIGH)
@@ -1444,13 +1445,7 @@ EXPORT_SYMBOL(free_pages);
1444 */ 1445 */
1445unsigned int nr_free_pages(void) 1446unsigned int nr_free_pages(void)
1446{ 1447{
1447 unsigned int sum = 0; 1448 return global_page_state(NR_FREE_PAGES);
1448 struct zone *zone;
1449
1450 for_each_zone(zone)
1451 sum += zone->free_pages;
1452
1453 return sum;
1454} 1449}
1455 1450
1456EXPORT_SYMBOL(nr_free_pages); 1451EXPORT_SYMBOL(nr_free_pages);
@@ -1458,13 +1453,7 @@ EXPORT_SYMBOL(nr_free_pages);
1458#ifdef CONFIG_NUMA 1453#ifdef CONFIG_NUMA
1459unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1454unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1460{ 1455{
1461 unsigned int sum = 0; 1456 return node_page_state(pgdat->node_id, NR_FREE_PAGES);
1462 enum zone_type i;
1463
1464 for (i = 0; i < MAX_NR_ZONES; i++)
1465 sum += pgdat->node_zones[i].free_pages;
1466
1467 return sum;
1468} 1457}
1469#endif 1458#endif
1470 1459
@@ -1514,7 +1503,7 @@ void si_meminfo(struct sysinfo *val)
1514{ 1503{
1515 val->totalram = totalram_pages; 1504 val->totalram = totalram_pages;
1516 val->sharedram = 0; 1505 val->sharedram = 0;
1517 val->freeram = nr_free_pages(); 1506 val->freeram = global_page_state(NR_FREE_PAGES);
1518 val->bufferram = nr_blockdev_pages(); 1507 val->bufferram = nr_blockdev_pages();
1519 val->totalhigh = totalhigh_pages; 1508 val->totalhigh = totalhigh_pages;
1520 val->freehigh = nr_free_highpages(); 1509 val->freehigh = nr_free_highpages();
@@ -1529,10 +1518,11 @@ void si_meminfo_node(struct sysinfo *val, int nid)
1529 pg_data_t *pgdat = NODE_DATA(nid); 1518 pg_data_t *pgdat = NODE_DATA(nid);
1530 1519
1531 val->totalram = pgdat->node_present_pages; 1520 val->totalram = pgdat->node_present_pages;
1532 val->freeram = nr_free_pages_pgdat(pgdat); 1521 val->freeram = node_page_state(nid, NR_FREE_PAGES);
1533#ifdef CONFIG_HIGHMEM 1522#ifdef CONFIG_HIGHMEM
1534 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1523 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1535 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1524 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1525 NR_FREE_PAGES);
1536#else 1526#else
1537 val->totalhigh = 0; 1527 val->totalhigh = 0;
1538 val->freehigh = 0; 1528 val->freehigh = 0;
@@ -1580,13 +1570,13 @@ void show_free_areas(void)
1580 get_zone_counts(&active, &inactive, &free); 1570 get_zone_counts(&active, &inactive, &free);
1581 1571
1582 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" 1572 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
1583 " free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 1573 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1584 active, 1574 active,
1585 inactive, 1575 inactive,
1586 global_page_state(NR_FILE_DIRTY), 1576 global_page_state(NR_FILE_DIRTY),
1587 global_page_state(NR_WRITEBACK), 1577 global_page_state(NR_WRITEBACK),
1588 global_page_state(NR_UNSTABLE_NFS), 1578 global_page_state(NR_UNSTABLE_NFS),
1589 nr_free_pages(), 1579 global_page_state(NR_FREE_PAGES),
1590 global_page_state(NR_SLAB_RECLAIMABLE) + 1580 global_page_state(NR_SLAB_RECLAIMABLE) +
1591 global_page_state(NR_SLAB_UNRECLAIMABLE), 1581 global_page_state(NR_SLAB_UNRECLAIMABLE),
1592 global_page_state(NR_FILE_MAPPED), 1582 global_page_state(NR_FILE_MAPPED),
@@ -1612,7 +1602,7 @@ void show_free_areas(void)
1612 " all_unreclaimable? %s" 1602 " all_unreclaimable? %s"
1613 "\n", 1603 "\n",
1614 zone->name, 1604 zone->name,
1615 K(zone->free_pages), 1605 K(zone_page_state(zone, NR_FREE_PAGES)),
1616 K(zone->pages_min), 1606 K(zone->pages_min),
1617 K(zone->pages_low), 1607 K(zone->pages_low),
1618 K(zone->pages_high), 1608 K(zone->pages_high),
@@ -2675,7 +2665,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2675 spin_lock_init(&zone->lru_lock); 2665 spin_lock_init(&zone->lru_lock);
2676 zone_seqlock_init(zone); 2666 zone_seqlock_init(zone);
2677 zone->zone_pgdat = pgdat; 2667 zone->zone_pgdat = pgdat;
2678 zone->free_pages = 0;
2679 2668
2680 zone->prev_priority = DEF_PRIORITY; 2669 zone->prev_priority = DEF_PRIORITY;
2681 2670
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 5462106725d..2386716f175 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -16,30 +16,17 @@
16void __get_zone_counts(unsigned long *active, unsigned long *inactive, 16void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17 unsigned long *free, struct pglist_data *pgdat) 17 unsigned long *free, struct pglist_data *pgdat)
18{ 18{
19 struct zone *zones = pgdat->node_zones;
20 int i;
21
22 *active = node_page_state(pgdat->node_id, NR_ACTIVE); 19 *active = node_page_state(pgdat->node_id, NR_ACTIVE);
23 *inactive = node_page_state(pgdat->node_id, NR_INACTIVE); 20 *inactive = node_page_state(pgdat->node_id, NR_INACTIVE);
24 *free = 0; 21 *free = node_page_state(pgdat->node_id, NR_FREE_PAGES);
25 for (i = 0; i < MAX_NR_ZONES; i++) {
26 *free += zones[i].free_pages;
27 }
28} 22}
29 23
30void get_zone_counts(unsigned long *active, 24void get_zone_counts(unsigned long *active,
31 unsigned long *inactive, unsigned long *free) 25 unsigned long *inactive, unsigned long *free)
32{ 26{
33 struct pglist_data *pgdat;
34
35 *active = global_page_state(NR_ACTIVE); 27 *active = global_page_state(NR_ACTIVE);
36 *inactive = global_page_state(NR_INACTIVE); 28 *inactive = global_page_state(NR_INACTIVE);
37 *free = 0; 29 *free = global_page_state(NR_FREE_PAGES);
38 for_each_online_pgdat(pgdat) {
39 unsigned long l, m, n;
40 __get_zone_counts(&l, &m, &n, pgdat);
41 *free += n;
42 }
43} 30}
44 31
45#ifdef CONFIG_VM_EVENT_COUNTERS 32#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -454,6 +441,7 @@ const struct seq_operations fragmentation_op = {
454 441
455static const char * const vmstat_text[] = { 442static const char * const vmstat_text[] = {
456 /* Zoned VM counters */ 443 /* Zoned VM counters */
444 "nr_free_pages",
457 "nr_active", 445 "nr_active",
458 "nr_inactive", 446 "nr_inactive",
459 "nr_anon_pages", 447 "nr_anon_pages",
@@ -534,7 +522,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
534 "\n scanned %lu (a: %lu i: %lu)" 522 "\n scanned %lu (a: %lu i: %lu)"
535 "\n spanned %lu" 523 "\n spanned %lu"
536 "\n present %lu", 524 "\n present %lu",
537 zone->free_pages, 525 zone_page_state(zone, NR_FREE_PAGES),
538 zone->pages_min, 526 zone->pages_min,
539 zone->pages_low, 527 zone->pages_low,
540 zone->pages_high, 528 zone->pages_high,