diff options
author | Jiang Liu <liuj97@gmail.com> | 2013-02-22 19:33:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:14 -0500 |
commit | b40da04946aa7b603b2aa4dd479f83b2c9090d96 (patch) | |
tree | e4c93fd9375f9c90449ef37f4456fbd3c5a7b6b6 /mm | |
parent | f7210e6c4ac795694106c1c5307134d3fc233e88 (diff) |
mm: use zone->present_pages instead of zone->managed_pages where appropriate
Now we have zone->managed_pages for "pages managed by the buddy system
in the zone", so replace zone->present_pages with zone->managed_pages if
what the user really wants is number of allocatable pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Maciej Rutecki <maciej.rutecki@gmail.com>
Cc: Chris Clayton <chris2553@googlemail.com>
Cc: "Rafael J . Wysocki" <rjw@sisk.pl>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Jianguo Wu <wujianguo@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 32 | ||||
-rw-r--r-- | mm/vmscan.c | 14 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
3 files changed, 24 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a7381be21320..5f73106bd8dd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2808,7 +2808,7 @@ static unsigned int nr_free_zone_pages(int offset) | |||
2808 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); | 2808 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); |
2809 | 2809 | ||
2810 | for_each_zone_zonelist(zone, z, zonelist, offset) { | 2810 | for_each_zone_zonelist(zone, z, zonelist, offset) { |
2811 | unsigned long size = zone->present_pages; | 2811 | unsigned long size = zone->managed_pages; |
2812 | unsigned long high = high_wmark_pages(zone); | 2812 | unsigned long high = high_wmark_pages(zone); |
2813 | if (size > high) | 2813 | if (size > high) |
2814 | sum += size - high; | 2814 | sum += size - high; |
@@ -2861,7 +2861,7 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
2861 | val->totalram = pgdat->node_present_pages; | 2861 | val->totalram = pgdat->node_present_pages; |
2862 | val->freeram = node_page_state(nid, NR_FREE_PAGES); | 2862 | val->freeram = node_page_state(nid, NR_FREE_PAGES); |
2863 | #ifdef CONFIG_HIGHMEM | 2863 | #ifdef CONFIG_HIGHMEM |
2864 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; | 2864 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; |
2865 | val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], | 2865 | val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], |
2866 | NR_FREE_PAGES); | 2866 | NR_FREE_PAGES); |
2867 | #else | 2867 | #else |
@@ -3939,7 +3939,7 @@ static int __meminit zone_batchsize(struct zone *zone) | |||
3939 | * | 3939 | * |
3940 | * OK, so we don't know how big the cache is. So guess. | 3940 | * OK, so we don't know how big the cache is. So guess. |
3941 | */ | 3941 | */ |
3942 | batch = zone->present_pages / 1024; | 3942 | batch = zone->managed_pages / 1024; |
3943 | if (batch * PAGE_SIZE > 512 * 1024) | 3943 | if (batch * PAGE_SIZE > 512 * 1024) |
3944 | batch = (512 * 1024) / PAGE_SIZE; | 3944 | batch = (512 * 1024) / PAGE_SIZE; |
3945 | batch /= 4; /* We effectively *= 4 below */ | 3945 | batch /= 4; /* We effectively *= 4 below */ |
@@ -4023,7 +4023,7 @@ static void __meminit setup_zone_pageset(struct zone *zone) | |||
4023 | 4023 | ||
4024 | if (percpu_pagelist_fraction) | 4024 | if (percpu_pagelist_fraction) |
4025 | setup_pagelist_highmark(pcp, | 4025 | setup_pagelist_highmark(pcp, |
4026 | (zone->present_pages / | 4026 | (zone->managed_pages / |
4027 | percpu_pagelist_fraction)); | 4027 | percpu_pagelist_fraction)); |
4028 | } | 4028 | } |
4029 | } | 4029 | } |
@@ -5435,8 +5435,8 @@ static void calculate_totalreserve_pages(void) | |||
5435 | /* we treat the high watermark as reserved pages. */ | 5435 | /* we treat the high watermark as reserved pages. */ |
5436 | max += high_wmark_pages(zone); | 5436 | max += high_wmark_pages(zone); |
5437 | 5437 | ||
5438 | if (max > zone->present_pages) | 5438 | if (max > zone->managed_pages) |
5439 | max = zone->present_pages; | 5439 | max = zone->managed_pages; |
5440 | reserve_pages += max; | 5440 | reserve_pages += max; |
5441 | /* | 5441 | /* |
5442 | * Lowmem reserves are not available to | 5442 | * Lowmem reserves are not available to |
@@ -5468,7 +5468,7 @@ static void setup_per_zone_lowmem_reserve(void) | |||
5468 | for_each_online_pgdat(pgdat) { | 5468 | for_each_online_pgdat(pgdat) { |
5469 | for (j = 0; j < MAX_NR_ZONES; j++) { | 5469 | for (j = 0; j < MAX_NR_ZONES; j++) { |
5470 | struct zone *zone = pgdat->node_zones + j; | 5470 | struct zone *zone = pgdat->node_zones + j; |
5471 | unsigned long present_pages = zone->present_pages; | 5471 | unsigned long managed_pages = zone->managed_pages; |
5472 | 5472 | ||
5473 | zone->lowmem_reserve[j] = 0; | 5473 | zone->lowmem_reserve[j] = 0; |
5474 | 5474 | ||
@@ -5482,9 +5482,9 @@ static void setup_per_zone_lowmem_reserve(void) | |||
5482 | sysctl_lowmem_reserve_ratio[idx] = 1; | 5482 | sysctl_lowmem_reserve_ratio[idx] = 1; |
5483 | 5483 | ||
5484 | lower_zone = pgdat->node_zones + idx; | 5484 | lower_zone = pgdat->node_zones + idx; |
5485 | lower_zone->lowmem_reserve[j] = present_pages / | 5485 | lower_zone->lowmem_reserve[j] = managed_pages / |
5486 | sysctl_lowmem_reserve_ratio[idx]; | 5486 | sysctl_lowmem_reserve_ratio[idx]; |
5487 | present_pages += lower_zone->present_pages; | 5487 | managed_pages += lower_zone->managed_pages; |
5488 | } | 5488 | } |
5489 | } | 5489 | } |
5490 | } | 5490 | } |
@@ -5503,14 +5503,14 @@ static void __setup_per_zone_wmarks(void) | |||
5503 | /* Calculate total number of !ZONE_HIGHMEM pages */ | 5503 | /* Calculate total number of !ZONE_HIGHMEM pages */ |
5504 | for_each_zone(zone) { | 5504 | for_each_zone(zone) { |
5505 | if (!is_highmem(zone)) | 5505 | if (!is_highmem(zone)) |
5506 | lowmem_pages += zone->present_pages; | 5506 | lowmem_pages += zone->managed_pages; |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | for_each_zone(zone) { | 5509 | for_each_zone(zone) { |
5510 | u64 tmp; | 5510 | u64 tmp; |
5511 | 5511 | ||
5512 | spin_lock_irqsave(&zone->lock, flags); | 5512 | spin_lock_irqsave(&zone->lock, flags); |
5513 | tmp = (u64)pages_min * zone->present_pages; | 5513 | tmp = (u64)pages_min * zone->managed_pages; |
5514 | do_div(tmp, lowmem_pages); | 5514 | do_div(tmp, lowmem_pages); |
5515 | if (is_highmem(zone)) { | 5515 | if (is_highmem(zone)) { |
5516 | /* | 5516 | /* |
@@ -5524,7 +5524,7 @@ static void __setup_per_zone_wmarks(void) | |||
5524 | */ | 5524 | */ |
5525 | unsigned long min_pages; | 5525 | unsigned long min_pages; |
5526 | 5526 | ||
5527 | min_pages = zone->present_pages / 1024; | 5527 | min_pages = zone->managed_pages / 1024; |
5528 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); | 5528 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); |
5529 | zone->watermark[WMARK_MIN] = min_pages; | 5529 | zone->watermark[WMARK_MIN] = min_pages; |
5530 | } else { | 5530 | } else { |
@@ -5586,7 +5586,7 @@ static void __meminit calculate_zone_inactive_ratio(struct zone *zone) | |||
5586 | unsigned int gb, ratio; | 5586 | unsigned int gb, ratio; |
5587 | 5587 | ||
5588 | /* Zone size in gigabytes */ | 5588 | /* Zone size in gigabytes */ |
5589 | gb = zone->present_pages >> (30 - PAGE_SHIFT); | 5589 | gb = zone->managed_pages >> (30 - PAGE_SHIFT); |
5590 | if (gb) | 5590 | if (gb) |
5591 | ratio = int_sqrt(10 * gb); | 5591 | ratio = int_sqrt(10 * gb); |
5592 | else | 5592 | else |
@@ -5672,7 +5672,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, | |||
5672 | return rc; | 5672 | return rc; |
5673 | 5673 | ||
5674 | for_each_zone(zone) | 5674 | for_each_zone(zone) |
5675 | zone->min_unmapped_pages = (zone->present_pages * | 5675 | zone->min_unmapped_pages = (zone->managed_pages * |
5676 | sysctl_min_unmapped_ratio) / 100; | 5676 | sysctl_min_unmapped_ratio) / 100; |
5677 | return 0; | 5677 | return 0; |
5678 | } | 5678 | } |
@@ -5688,7 +5688,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, | |||
5688 | return rc; | 5688 | return rc; |
5689 | 5689 | ||
5690 | for_each_zone(zone) | 5690 | for_each_zone(zone) |
5691 | zone->min_slab_pages = (zone->present_pages * | 5691 | zone->min_slab_pages = (zone->managed_pages * |
5692 | sysctl_min_slab_ratio) / 100; | 5692 | sysctl_min_slab_ratio) / 100; |
5693 | return 0; | 5693 | return 0; |
5694 | } | 5694 | } |
@@ -5730,7 +5730,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | |||
5730 | for_each_populated_zone(zone) { | 5730 | for_each_populated_zone(zone) { |
5731 | for_each_possible_cpu(cpu) { | 5731 | for_each_possible_cpu(cpu) { |
5732 | unsigned long high; | 5732 | unsigned long high; |
5733 | high = zone->present_pages / percpu_pagelist_fraction; | 5733 | high = zone->managed_pages / percpu_pagelist_fraction; |
5734 | setup_pagelist_highmark( | 5734 | setup_pagelist_highmark( |
5735 | per_cpu_ptr(zone->pageset, cpu), high); | 5735 | per_cpu_ptr(zone->pageset, cpu), high); |
5736 | } | 5736 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 4093b99044f6..8fde2fc223d9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2010,7 +2010,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) | |||
2010 | * a reasonable chance of completing and allocating the page | 2010 | * a reasonable chance of completing and allocating the page |
2011 | */ | 2011 | */ |
2012 | balance_gap = min(low_wmark_pages(zone), | 2012 | balance_gap = min(low_wmark_pages(zone), |
2013 | (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / | 2013 | (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / |
2014 | KSWAPD_ZONE_BALANCE_GAP_RATIO); | 2014 | KSWAPD_ZONE_BALANCE_GAP_RATIO); |
2015 | watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); | 2015 | watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); |
2016 | watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); | 2016 | watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); |
@@ -2525,7 +2525,7 @@ static bool zone_balanced(struct zone *zone, int order, | |||
2525 | */ | 2525 | */ |
2526 | static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) | 2526 | static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) |
2527 | { | 2527 | { |
2528 | unsigned long present_pages = 0; | 2528 | unsigned long managed_pages = 0; |
2529 | unsigned long balanced_pages = 0; | 2529 | unsigned long balanced_pages = 0; |
2530 | int i; | 2530 | int i; |
2531 | 2531 | ||
@@ -2536,7 +2536,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) | |||
2536 | if (!populated_zone(zone)) | 2536 | if (!populated_zone(zone)) |
2537 | continue; | 2537 | continue; |
2538 | 2538 | ||
2539 | present_pages += zone->present_pages; | 2539 | managed_pages += zone->managed_pages; |
2540 | 2540 | ||
2541 | /* | 2541 | /* |
2542 | * A special case here: | 2542 | * A special case here: |
@@ -2546,18 +2546,18 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) | |||
2546 | * they must be considered balanced here as well! | 2546 | * they must be considered balanced here as well! |
2547 | */ | 2547 | */ |
2548 | if (zone->all_unreclaimable) { | 2548 | if (zone->all_unreclaimable) { |
2549 | balanced_pages += zone->present_pages; | 2549 | balanced_pages += zone->managed_pages; |
2550 | continue; | 2550 | continue; |
2551 | } | 2551 | } |
2552 | 2552 | ||
2553 | if (zone_balanced(zone, order, 0, i)) | 2553 | if (zone_balanced(zone, order, 0, i)) |
2554 | balanced_pages += zone->present_pages; | 2554 | balanced_pages += zone->managed_pages; |
2555 | else if (!order) | 2555 | else if (!order) |
2556 | return false; | 2556 | return false; |
2557 | } | 2557 | } |
2558 | 2558 | ||
2559 | if (order) | 2559 | if (order) |
2560 | return balanced_pages >= (present_pages >> 2); | 2560 | return balanced_pages >= (managed_pages >> 2); |
2561 | else | 2561 | else |
2562 | return true; | 2562 | return true; |
2563 | } | 2563 | } |
@@ -2745,7 +2745,7 @@ loop_again: | |||
2745 | * of the zone, whichever is smaller. | 2745 | * of the zone, whichever is smaller. |
2746 | */ | 2746 | */ |
2747 | balance_gap = min(low_wmark_pages(zone), | 2747 | balance_gap = min(low_wmark_pages(zone), |
2748 | (zone->present_pages + | 2748 | (zone->managed_pages + |
2749 | KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / | 2749 | KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / |
2750 | KSWAPD_ZONE_BALANCE_GAP_RATIO); | 2750 | KSWAPD_ZONE_BALANCE_GAP_RATIO); |
2751 | /* | 2751 | /* |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 9800306c8195..e3475f5fd983 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -142,7 +142,7 @@ int calculate_normal_threshold(struct zone *zone) | |||
142 | * 125 1024 10 16-32 GB 9 | 142 | * 125 1024 10 16-32 GB 9 |
143 | */ | 143 | */ |
144 | 144 | ||
145 | mem = zone->present_pages >> (27 - PAGE_SHIFT); | 145 | mem = zone->managed_pages >> (27 - PAGE_SHIFT); |
146 | 146 | ||
147 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | 147 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
148 | 148 | ||