diff options
author | Jiang Liu <liuj97@gmail.com> | 2013-02-22 19:33:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:14 -0500 |
commit | b40da04946aa7b603b2aa4dd479f83b2c9090d96 (patch) | |
tree | e4c93fd9375f9c90449ef37f4456fbd3c5a7b6b6 /mm/page_alloc.c | |
parent | f7210e6c4ac795694106c1c5307134d3fc233e88 (diff) |
mm: use zone->present_pages instead of zone->managed_pages where appropriate
Now we have zone->managed_pages for "pages managed by the buddy system
in the zone", so replace zone->present_pages with zone->managed_pages if
what the user really wants is number of allocatable pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Maciej Rutecki <maciej.rutecki@gmail.com>
Cc: Chris Clayton <chris2553@googlemail.com>
Cc: "Rafael J . Wysocki" <rjw@sisk.pl>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Jianguo Wu <wujianguo@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a7381be21320..5f73106bd8dd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2808,7 +2808,7 @@ static unsigned int nr_free_zone_pages(int offset) | |||
2808 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); | 2808 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); |
2809 | 2809 | ||
2810 | for_each_zone_zonelist(zone, z, zonelist, offset) { | 2810 | for_each_zone_zonelist(zone, z, zonelist, offset) { |
2811 | unsigned long size = zone->present_pages; | 2811 | unsigned long size = zone->managed_pages; |
2812 | unsigned long high = high_wmark_pages(zone); | 2812 | unsigned long high = high_wmark_pages(zone); |
2813 | if (size > high) | 2813 | if (size > high) |
2814 | sum += size - high; | 2814 | sum += size - high; |
@@ -2861,7 +2861,7 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
2861 | val->totalram = pgdat->node_present_pages; | 2861 | val->totalram = pgdat->node_present_pages; |
2862 | val->freeram = node_page_state(nid, NR_FREE_PAGES); | 2862 | val->freeram = node_page_state(nid, NR_FREE_PAGES); |
2863 | #ifdef CONFIG_HIGHMEM | 2863 | #ifdef CONFIG_HIGHMEM |
2864 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; | 2864 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; |
2865 | val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], | 2865 | val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], |
2866 | NR_FREE_PAGES); | 2866 | NR_FREE_PAGES); |
2867 | #else | 2867 | #else |
@@ -3939,7 +3939,7 @@ static int __meminit zone_batchsize(struct zone *zone) | |||
3939 | * | 3939 | * |
3940 | * OK, so we don't know how big the cache is. So guess. | 3940 | * OK, so we don't know how big the cache is. So guess. |
3941 | */ | 3941 | */ |
3942 | batch = zone->present_pages / 1024; | 3942 | batch = zone->managed_pages / 1024; |
3943 | if (batch * PAGE_SIZE > 512 * 1024) | 3943 | if (batch * PAGE_SIZE > 512 * 1024) |
3944 | batch = (512 * 1024) / PAGE_SIZE; | 3944 | batch = (512 * 1024) / PAGE_SIZE; |
3945 | batch /= 4; /* We effectively *= 4 below */ | 3945 | batch /= 4; /* We effectively *= 4 below */ |
@@ -4023,7 +4023,7 @@ static void __meminit setup_zone_pageset(struct zone *zone) | |||
4023 | 4023 | ||
4024 | if (percpu_pagelist_fraction) | 4024 | if (percpu_pagelist_fraction) |
4025 | setup_pagelist_highmark(pcp, | 4025 | setup_pagelist_highmark(pcp, |
4026 | (zone->present_pages / | 4026 | (zone->managed_pages / |
4027 | percpu_pagelist_fraction)); | 4027 | percpu_pagelist_fraction)); |
4028 | } | 4028 | } |
4029 | } | 4029 | } |
@@ -5435,8 +5435,8 @@ static void calculate_totalreserve_pages(void) | |||
5435 | /* we treat the high watermark as reserved pages. */ | 5435 | /* we treat the high watermark as reserved pages. */ |
5436 | max += high_wmark_pages(zone); | 5436 | max += high_wmark_pages(zone); |
5437 | 5437 | ||
5438 | if (max > zone->present_pages) | 5438 | if (max > zone->managed_pages) |
5439 | max = zone->present_pages; | 5439 | max = zone->managed_pages; |
5440 | reserve_pages += max; | 5440 | reserve_pages += max; |
5441 | /* | 5441 | /* |
5442 | * Lowmem reserves are not available to | 5442 | * Lowmem reserves are not available to |
@@ -5468,7 +5468,7 @@ static void setup_per_zone_lowmem_reserve(void) | |||
5468 | for_each_online_pgdat(pgdat) { | 5468 | for_each_online_pgdat(pgdat) { |
5469 | for (j = 0; j < MAX_NR_ZONES; j++) { | 5469 | for (j = 0; j < MAX_NR_ZONES; j++) { |
5470 | struct zone *zone = pgdat->node_zones + j; | 5470 | struct zone *zone = pgdat->node_zones + j; |
5471 | unsigned long present_pages = zone->present_pages; | 5471 | unsigned long managed_pages = zone->managed_pages; |
5472 | 5472 | ||
5473 | zone->lowmem_reserve[j] = 0; | 5473 | zone->lowmem_reserve[j] = 0; |
5474 | 5474 | ||
@@ -5482,9 +5482,9 @@ static void setup_per_zone_lowmem_reserve(void) | |||
5482 | sysctl_lowmem_reserve_ratio[idx] = 1; | 5482 | sysctl_lowmem_reserve_ratio[idx] = 1; |
5483 | 5483 | ||
5484 | lower_zone = pgdat->node_zones + idx; | 5484 | lower_zone = pgdat->node_zones + idx; |
5485 | lower_zone->lowmem_reserve[j] = present_pages / | 5485 | lower_zone->lowmem_reserve[j] = managed_pages / |
5486 | sysctl_lowmem_reserve_ratio[idx]; | 5486 | sysctl_lowmem_reserve_ratio[idx]; |
5487 | present_pages += lower_zone->present_pages; | 5487 | managed_pages += lower_zone->managed_pages; |
5488 | } | 5488 | } |
5489 | } | 5489 | } |
5490 | } | 5490 | } |
@@ -5503,14 +5503,14 @@ static void __setup_per_zone_wmarks(void) | |||
5503 | /* Calculate total number of !ZONE_HIGHMEM pages */ | 5503 | /* Calculate total number of !ZONE_HIGHMEM pages */ |
5504 | for_each_zone(zone) { | 5504 | for_each_zone(zone) { |
5505 | if (!is_highmem(zone)) | 5505 | if (!is_highmem(zone)) |
5506 | lowmem_pages += zone->present_pages; | 5506 | lowmem_pages += zone->managed_pages; |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | for_each_zone(zone) { | 5509 | for_each_zone(zone) { |
5510 | u64 tmp; | 5510 | u64 tmp; |
5511 | 5511 | ||
5512 | spin_lock_irqsave(&zone->lock, flags); | 5512 | spin_lock_irqsave(&zone->lock, flags); |
5513 | tmp = (u64)pages_min * zone->present_pages; | 5513 | tmp = (u64)pages_min * zone->managed_pages; |
5514 | do_div(tmp, lowmem_pages); | 5514 | do_div(tmp, lowmem_pages); |
5515 | if (is_highmem(zone)) { | 5515 | if (is_highmem(zone)) { |
5516 | /* | 5516 | /* |
@@ -5524,7 +5524,7 @@ static void __setup_per_zone_wmarks(void) | |||
5524 | */ | 5524 | */ |
5525 | unsigned long min_pages; | 5525 | unsigned long min_pages; |
5526 | 5526 | ||
5527 | min_pages = zone->present_pages / 1024; | 5527 | min_pages = zone->managed_pages / 1024; |
5528 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); | 5528 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); |
5529 | zone->watermark[WMARK_MIN] = min_pages; | 5529 | zone->watermark[WMARK_MIN] = min_pages; |
5530 | } else { | 5530 | } else { |
@@ -5586,7 +5586,7 @@ static void __meminit calculate_zone_inactive_ratio(struct zone *zone) | |||
5586 | unsigned int gb, ratio; | 5586 | unsigned int gb, ratio; |
5587 | 5587 | ||
5588 | /* Zone size in gigabytes */ | 5588 | /* Zone size in gigabytes */ |
5589 | gb = zone->present_pages >> (30 - PAGE_SHIFT); | 5589 | gb = zone->managed_pages >> (30 - PAGE_SHIFT); |
5590 | if (gb) | 5590 | if (gb) |
5591 | ratio = int_sqrt(10 * gb); | 5591 | ratio = int_sqrt(10 * gb); |
5592 | else | 5592 | else |
@@ -5672,7 +5672,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, | |||
5672 | return rc; | 5672 | return rc; |
5673 | 5673 | ||
5674 | for_each_zone(zone) | 5674 | for_each_zone(zone) |
5675 | zone->min_unmapped_pages = (zone->present_pages * | 5675 | zone->min_unmapped_pages = (zone->managed_pages * |
5676 | sysctl_min_unmapped_ratio) / 100; | 5676 | sysctl_min_unmapped_ratio) / 100; |
5677 | return 0; | 5677 | return 0; |
5678 | } | 5678 | } |
@@ -5688,7 +5688,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, | |||
5688 | return rc; | 5688 | return rc; |
5689 | 5689 | ||
5690 | for_each_zone(zone) | 5690 | for_each_zone(zone) |
5691 | zone->min_slab_pages = (zone->present_pages * | 5691 | zone->min_slab_pages = (zone->managed_pages * |
5692 | sysctl_min_slab_ratio) / 100; | 5692 | sysctl_min_slab_ratio) / 100; |
5693 | return 0; | 5693 | return 0; |
5694 | } | 5694 | } |
@@ -5730,7 +5730,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | |||
5730 | for_each_populated_zone(zone) { | 5730 | for_each_populated_zone(zone) { |
5731 | for_each_possible_cpu(cpu) { | 5731 | for_each_possible_cpu(cpu) { |
5732 | unsigned long high; | 5732 | unsigned long high; |
5733 | high = zone->present_pages / percpu_pagelist_fraction; | 5733 | high = zone->managed_pages / percpu_pagelist_fraction; |
5734 | setup_pagelist_highmark( | 5734 | setup_pagelist_highmark( |
5735 | per_cpu_ptr(zone->pageset, cpu), high); | 5735 | per_cpu_ptr(zone->pageset, cpu), high); |
5736 | } | 5736 | } |