aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/page_alloc.c39
2 files changed, 40 insertions, 0 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 54eac8a39a4c..5b1fdf1cff4f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -155,6 +155,7 @@ extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *
155/* linux/mm/page_alloc.c */ 155/* linux/mm/page_alloc.c */
156extern unsigned long totalram_pages; 156extern unsigned long totalram_pages;
157extern unsigned long totalhigh_pages; 157extern unsigned long totalhigh_pages;
158extern unsigned long totalreserve_pages;
158extern long nr_swap_pages; 159extern long nr_swap_pages;
159extern unsigned int nr_free_pages(void); 160extern unsigned int nr_free_pages(void);
160extern unsigned int nr_free_pages_pgdat(pg_data_t *pgdat); 161extern unsigned int nr_free_pages_pgdat(pg_data_t *pgdat);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b8165e037dee..97d6827c7d66 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -51,6 +51,7 @@ nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
51EXPORT_SYMBOL(node_possible_map); 51EXPORT_SYMBOL(node_possible_map);
52unsigned long totalram_pages __read_mostly; 52unsigned long totalram_pages __read_mostly;
53unsigned long totalhigh_pages __read_mostly; 53unsigned long totalhigh_pages __read_mostly;
54unsigned long totalreserve_pages __read_mostly;
54long nr_swap_pages; 55long nr_swap_pages;
55int percpu_pagelist_fraction; 56int percpu_pagelist_fraction;
56 57
@@ -2477,6 +2478,38 @@ void __init page_alloc_init(void)
2477} 2478}
2478 2479
2479/* 2480/*
2481 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
2482 * or min_free_kbytes changes.
2483 */
2484static void calculate_totalreserve_pages(void)
2485{
2486 struct pglist_data *pgdat;
2487 unsigned long reserve_pages = 0;
2488 int i, j;
2489
2490 for_each_online_pgdat(pgdat) {
2491 for (i = 0; i < MAX_NR_ZONES; i++) {
2492 struct zone *zone = pgdat->node_zones + i;
2493 unsigned long max = 0;
2494
2495 /* Find valid and maximum lowmem_reserve in the zone */
2496 for (j = i; j < MAX_NR_ZONES; j++) {
2497 if (zone->lowmem_reserve[j] > max)
2498 max = zone->lowmem_reserve[j];
2499 }
2500
2501 /* we treat pages_high as reserved pages. */
2502 max += zone->pages_high;
2503
2504 if (max > zone->present_pages)
2505 max = zone->present_pages;
2506 reserve_pages += max;
2507 }
2508 }
2509 totalreserve_pages = reserve_pages;
2510}
2511
2512/*
2480 * setup_per_zone_lowmem_reserve - called whenever 2513 * setup_per_zone_lowmem_reserve - called whenever
2481 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2514 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
2482 * has a correct pages reserved value, so an adequate number of 2515 * has a correct pages reserved value, so an adequate number of
@@ -2507,6 +2540,9 @@ static void setup_per_zone_lowmem_reserve(void)
2507 } 2540 }
2508 } 2541 }
2509 } 2542 }
2543
2544 /* update totalreserve_pages */
2545 calculate_totalreserve_pages();
2510} 2546}
2511 2547
2512/* 2548/*
@@ -2561,6 +2597,9 @@ void setup_per_zone_pages_min(void)
2561 zone->pages_high = zone->pages_min + tmp / 2; 2597 zone->pages_high = zone->pages_min + tmp / 2;
2562 spin_unlock_irqrestore(&zone->lru_lock, flags); 2598 spin_unlock_irqrestore(&zone->lru_lock, flags);
2563 } 2599 }
2600
2601 /* update totalreserve_pages */
2602 calculate_totalreserve_pages();
2564} 2603}
2565 2604
2566/* 2605/*