diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-02-10 04:43:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-11 13:51:17 -0500 |
commit | d23ad42324cc4378132e51f2fc5c9ba6cbe75182 (patch) | |
tree | 6844416befb3988e432e8f422f3a369e2f760d39 /mm/page_alloc.c | |
parent | c878538598d1e7ab41ecc0de8894e34e2fdef630 (diff) |
[PATCH] Use ZVC for free_pages
This is again simplifies some of the VM counter calculations through the use
of the ZVC consolidated counters.
[michal.k.k.piotrowski@gmail.com: build fix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Michal Piotrowski <michal.k.k.piotrowski@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 37 |
1 files changed, 13 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07c954e53270..ba62d8789f73 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -395,7 +395,7 @@ static inline void __free_one_page(struct page *page, | |||
395 | VM_BUG_ON(page_idx & (order_size - 1)); | 395 | VM_BUG_ON(page_idx & (order_size - 1)); |
396 | VM_BUG_ON(bad_range(zone, page)); | 396 | VM_BUG_ON(bad_range(zone, page)); |
397 | 397 | ||
398 | zone->free_pages += order_size; | 398 | __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); |
399 | while (order < MAX_ORDER-1) { | 399 | while (order < MAX_ORDER-1) { |
400 | unsigned long combined_idx; | 400 | unsigned long combined_idx; |
401 | struct free_area *area; | 401 | struct free_area *area; |
@@ -631,7 +631,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order) | |||
631 | list_del(&page->lru); | 631 | list_del(&page->lru); |
632 | rmv_page_order(page); | 632 | rmv_page_order(page); |
633 | area->nr_free--; | 633 | area->nr_free--; |
634 | zone->free_pages -= 1UL << order; | 634 | __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); |
635 | expand(zone, page, order, current_order, area); | 635 | expand(zone, page, order, current_order, area); |
636 | return page; | 636 | return page; |
637 | } | 637 | } |
@@ -989,7 +989,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
989 | int classzone_idx, int alloc_flags) | 989 | int classzone_idx, int alloc_flags) |
990 | { | 990 | { |
991 | /* free_pages my go negative - that's OK */ | 991 | /* free_pages my go negative - that's OK */ |
992 | long min = mark, free_pages = z->free_pages - (1 << order) + 1; | 992 | long min = mark; |
993 | long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; | ||
993 | int o; | 994 | int o; |
994 | 995 | ||
995 | if (alloc_flags & ALLOC_HIGH) | 996 | if (alloc_flags & ALLOC_HIGH) |
@@ -1444,13 +1445,7 @@ EXPORT_SYMBOL(free_pages); | |||
1444 | */ | 1445 | */ |
1445 | unsigned int nr_free_pages(void) | 1446 | unsigned int nr_free_pages(void) |
1446 | { | 1447 | { |
1447 | unsigned int sum = 0; | 1448 | return global_page_state(NR_FREE_PAGES); |
1448 | struct zone *zone; | ||
1449 | |||
1450 | for_each_zone(zone) | ||
1451 | sum += zone->free_pages; | ||
1452 | |||
1453 | return sum; | ||
1454 | } | 1449 | } |
1455 | 1450 | ||
1456 | EXPORT_SYMBOL(nr_free_pages); | 1451 | EXPORT_SYMBOL(nr_free_pages); |
@@ -1458,13 +1453,7 @@ EXPORT_SYMBOL(nr_free_pages); | |||
1458 | #ifdef CONFIG_NUMA | 1453 | #ifdef CONFIG_NUMA |
1459 | unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) | 1454 | unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) |
1460 | { | 1455 | { |
1461 | unsigned int sum = 0; | 1456 | return node_page_state(pgdat->node_id, NR_FREE_PAGES); |
1462 | enum zone_type i; | ||
1463 | |||
1464 | for (i = 0; i < MAX_NR_ZONES; i++) | ||
1465 | sum += pgdat->node_zones[i].free_pages; | ||
1466 | |||
1467 | return sum; | ||
1468 | } | 1457 | } |
1469 | #endif | 1458 | #endif |
1470 | 1459 | ||
@@ -1514,7 +1503,7 @@ void si_meminfo(struct sysinfo *val) | |||
1514 | { | 1503 | { |
1515 | val->totalram = totalram_pages; | 1504 | val->totalram = totalram_pages; |
1516 | val->sharedram = 0; | 1505 | val->sharedram = 0; |
1517 | val->freeram = nr_free_pages(); | 1506 | val->freeram = global_page_state(NR_FREE_PAGES); |
1518 | val->bufferram = nr_blockdev_pages(); | 1507 | val->bufferram = nr_blockdev_pages(); |
1519 | val->totalhigh = totalhigh_pages; | 1508 | val->totalhigh = totalhigh_pages; |
1520 | val->freehigh = nr_free_highpages(); | 1509 | val->freehigh = nr_free_highpages(); |
@@ -1529,10 +1518,11 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
1529 | pg_data_t *pgdat = NODE_DATA(nid); | 1518 | pg_data_t *pgdat = NODE_DATA(nid); |
1530 | 1519 | ||
1531 | val->totalram = pgdat->node_present_pages; | 1520 | val->totalram = pgdat->node_present_pages; |
1532 | val->freeram = nr_free_pages_pgdat(pgdat); | 1521 | val->freeram = node_page_state(nid, NR_FREE_PAGES); |
1533 | #ifdef CONFIG_HIGHMEM | 1522 | #ifdef CONFIG_HIGHMEM |
1534 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; | 1523 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; |
1535 | val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; | 1524 | val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], |
1525 | NR_FREE_PAGES); | ||
1536 | #else | 1526 | #else |
1537 | val->totalhigh = 0; | 1527 | val->totalhigh = 0; |
1538 | val->freehigh = 0; | 1528 | val->freehigh = 0; |
@@ -1580,13 +1570,13 @@ void show_free_areas(void) | |||
1580 | get_zone_counts(&active, &inactive, &free); | 1570 | get_zone_counts(&active, &inactive, &free); |
1581 | 1571 | ||
1582 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" | 1572 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" |
1583 | " free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", | 1573 | " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", |
1584 | active, | 1574 | active, |
1585 | inactive, | 1575 | inactive, |
1586 | global_page_state(NR_FILE_DIRTY), | 1576 | global_page_state(NR_FILE_DIRTY), |
1587 | global_page_state(NR_WRITEBACK), | 1577 | global_page_state(NR_WRITEBACK), |
1588 | global_page_state(NR_UNSTABLE_NFS), | 1578 | global_page_state(NR_UNSTABLE_NFS), |
1589 | nr_free_pages(), | 1579 | global_page_state(NR_FREE_PAGES), |
1590 | global_page_state(NR_SLAB_RECLAIMABLE) + | 1580 | global_page_state(NR_SLAB_RECLAIMABLE) + |
1591 | global_page_state(NR_SLAB_UNRECLAIMABLE), | 1581 | global_page_state(NR_SLAB_UNRECLAIMABLE), |
1592 | global_page_state(NR_FILE_MAPPED), | 1582 | global_page_state(NR_FILE_MAPPED), |
@@ -1612,7 +1602,7 @@ void show_free_areas(void) | |||
1612 | " all_unreclaimable? %s" | 1602 | " all_unreclaimable? %s" |
1613 | "\n", | 1603 | "\n", |
1614 | zone->name, | 1604 | zone->name, |
1615 | K(zone->free_pages), | 1605 | K(zone_page_state(zone, NR_FREE_PAGES)), |
1616 | K(zone->pages_min), | 1606 | K(zone->pages_min), |
1617 | K(zone->pages_low), | 1607 | K(zone->pages_low), |
1618 | K(zone->pages_high), | 1608 | K(zone->pages_high), |
@@ -2675,7 +2665,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, | |||
2675 | spin_lock_init(&zone->lru_lock); | 2665 | spin_lock_init(&zone->lru_lock); |
2676 | zone_seqlock_init(zone); | 2666 | zone_seqlock_init(zone); |
2677 | zone->zone_pgdat = pgdat; | 2667 | zone->zone_pgdat = pgdat; |
2678 | zone->free_pages = 0; | ||
2679 | 2668 | ||
2680 | zone->prev_priority = DEF_PRIORITY; | 2669 | zone->prev_priority = DEF_PRIORITY; |
2681 | 2670 | ||