aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c127
1 files changed, 59 insertions, 68 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59164313167f..bd8e33582d25 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -103,7 +103,7 @@ int min_free_kbytes = 1024;
103 103
104unsigned long __meminitdata nr_kernel_pages; 104unsigned long __meminitdata nr_kernel_pages;
105unsigned long __meminitdata nr_all_pages; 105unsigned long __meminitdata nr_all_pages;
106static unsigned long __initdata dma_reserve; 106static unsigned long __meminitdata dma_reserve;
107 107
108#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 108#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
109 /* 109 /*
@@ -126,16 +126,21 @@ static unsigned long __initdata dma_reserve;
126 #endif 126 #endif
127 #endif 127 #endif
128 128
129 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; 129 struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
130 int __initdata nr_nodemap_entries; 130 int __meminitdata nr_nodemap_entries;
131 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 131 unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
132 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 132 unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
133#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 133#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
134 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; 134 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
135 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; 135 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
136#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 136#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
137#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 137#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
138 138
139#if MAX_NUMNODES > 1
140int nr_node_ids __read_mostly = MAX_NUMNODES;
141EXPORT_SYMBOL(nr_node_ids);
142#endif
143
139#ifdef CONFIG_DEBUG_VM 144#ifdef CONFIG_DEBUG_VM
140static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 145static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
141{ 146{
@@ -669,65 +674,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
669 return i; 674 return i;
670} 675}
671 676
672#if MAX_NUMNODES > 1
673int nr_node_ids __read_mostly = MAX_NUMNODES;
674EXPORT_SYMBOL(nr_node_ids);
675
676/*
677 * Figure out the number of possible node ids.
678 */
679static void __init setup_nr_node_ids(void)
680{
681 unsigned int node;
682 unsigned int highest = 0;
683
684 for_each_node_mask(node, node_possible_map)
685 highest = node;
686 nr_node_ids = highest + 1;
687}
688#else
689static void __init setup_nr_node_ids(void) {}
690#endif
691
692#ifdef CONFIG_NUMA 677#ifdef CONFIG_NUMA
693/* 678/*
694 * Called from the slab reaper to drain pagesets on a particular node that 679 * Called from the vmstat counter updater to drain pagesets of this
695 * belongs to the currently executing processor. 680 * currently executing processor on remote nodes after they have
681 * expired.
682 *
696 * Note that this function must be called with the thread pinned to 683 * Note that this function must be called with the thread pinned to
697 * a single processor. 684 * a single processor.
698 */ 685 */
699void drain_node_pages(int nodeid) 686void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
700{ 687{
701 int i;
702 enum zone_type z;
703 unsigned long flags; 688 unsigned long flags;
689 int to_drain;
704 690
705 for (z = 0; z < MAX_NR_ZONES; z++) { 691 local_irq_save(flags);
706 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 692 if (pcp->count >= pcp->batch)
707 struct per_cpu_pageset *pset; 693 to_drain = pcp->batch;
708 694 else
709 if (!populated_zone(zone)) 695 to_drain = pcp->count;
710 continue; 696 free_pages_bulk(zone, to_drain, &pcp->list, 0);
711 697 pcp->count -= to_drain;
712 pset = zone_pcp(zone, smp_processor_id()); 698 local_irq_restore(flags);
713 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
714 struct per_cpu_pages *pcp;
715
716 pcp = &pset->pcp[i];
717 if (pcp->count) {
718 int to_drain;
719
720 local_irq_save(flags);
721 if (pcp->count >= pcp->batch)
722 to_drain = pcp->batch;
723 else
724 to_drain = pcp->count;
725 free_pages_bulk(zone, to_drain, &pcp->list, 0);
726 pcp->count -= to_drain;
727 local_irq_restore(flags);
728 }
729 }
730 }
731} 699}
732#endif 700#endif
733 701
@@ -2148,11 +2116,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2148 2116
2149 switch (action) { 2117 switch (action) {
2150 case CPU_UP_PREPARE: 2118 case CPU_UP_PREPARE:
2119 case CPU_UP_PREPARE_FROZEN:
2151 if (process_zones(cpu)) 2120 if (process_zones(cpu))
2152 ret = NOTIFY_BAD; 2121 ret = NOTIFY_BAD;
2153 break; 2122 break;
2154 case CPU_UP_CANCELED: 2123 case CPU_UP_CANCELED:
2124 case CPU_UP_CANCELED_FROZEN:
2155 case CPU_DEAD: 2125 case CPU_DEAD:
2126 case CPU_DEAD_FROZEN:
2156 free_zone_pagesets(cpu); 2127 free_zone_pagesets(cpu);
2157 break; 2128 break;
2158 default: 2129 default:
@@ -2179,7 +2150,7 @@ void __init setup_per_cpu_pageset(void)
2179 2150
2180#endif 2151#endif
2181 2152
2182static __meminit 2153static noinline __init_refok
2183int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2154int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2184{ 2155{
2185 int i; 2156 int i;
@@ -2267,7 +2238,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
2267 * Basic iterator support. Return the first range of PFNs for a node 2238 * Basic iterator support. Return the first range of PFNs for a node
2268 * Note: nid == MAX_NUMNODES returns first region regardless of node 2239 * Note: nid == MAX_NUMNODES returns first region regardless of node
2269 */ 2240 */
2270static int __init first_active_region_index_in_nid(int nid) 2241static int __meminit first_active_region_index_in_nid(int nid)
2271{ 2242{
2272 int i; 2243 int i;
2273 2244
@@ -2282,7 +2253,7 @@ static int __init first_active_region_index_in_nid(int nid)
2282 * Basic iterator support. Return the next active range of PFNs for a node 2253 * Basic iterator support. Return the next active range of PFNs for a node
2283 * Note: nid == MAX_NUMNODES returns next region regardles of node 2254 * Note: nid == MAX_NUMNODES returns next region regardles of node
2284 */ 2255 */
2285static int __init next_active_region_index_in_nid(int index, int nid) 2256static int __meminit next_active_region_index_in_nid(int index, int nid)
2286{ 2257{
2287 for (index = index + 1; index < nr_nodemap_entries; index++) 2258 for (index = index + 1; index < nr_nodemap_entries; index++)
2288 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2259 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
@@ -2298,7 +2269,7 @@ static int __init next_active_region_index_in_nid(int index, int nid)
2298 * was used and there are no special requirements, this is a convenient 2269 * was used and there are no special requirements, this is a convenient
2299 * alternative 2270 * alternative
2300 */ 2271 */
2301int __init early_pfn_to_nid(unsigned long pfn) 2272int __meminit early_pfn_to_nid(unsigned long pfn)
2302{ 2273{
2303 int i; 2274 int i;
2304 2275
@@ -2435,7 +2406,7 @@ static void __init account_node_boundary(unsigned int nid,
2435 * with no available memory, a warning is printed and the start and end 2406 * with no available memory, a warning is printed and the start and end
2436 * PFNs will be 0. 2407 * PFNs will be 0.
2437 */ 2408 */
2438void __init get_pfn_range_for_nid(unsigned int nid, 2409void __meminit get_pfn_range_for_nid(unsigned int nid,
2439 unsigned long *start_pfn, unsigned long *end_pfn) 2410 unsigned long *start_pfn, unsigned long *end_pfn)
2440{ 2411{
2441 int i; 2412 int i;
@@ -2460,7 +2431,7 @@ void __init get_pfn_range_for_nid(unsigned int nid,
2460 * Return the number of pages a zone spans in a node, including holes 2431 * Return the number of pages a zone spans in a node, including holes
2461 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2432 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2462 */ 2433 */
2463unsigned long __init zone_spanned_pages_in_node(int nid, 2434unsigned long __meminit zone_spanned_pages_in_node(int nid,
2464 unsigned long zone_type, 2435 unsigned long zone_type,
2465 unsigned long *ignored) 2436 unsigned long *ignored)
2466{ 2437{
@@ -2488,7 +2459,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
2488 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2459 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
2489 * then all holes in the requested range will be accounted for. 2460 * then all holes in the requested range will be accounted for.
2490 */ 2461 */
2491unsigned long __init __absent_pages_in_range(int nid, 2462unsigned long __meminit __absent_pages_in_range(int nid,
2492 unsigned long range_start_pfn, 2463 unsigned long range_start_pfn,
2493 unsigned long range_end_pfn) 2464 unsigned long range_end_pfn)
2494{ 2465{
@@ -2548,7 +2519,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2548} 2519}
2549 2520
2550/* Return the number of page frames in holes in a zone on a node */ 2521/* Return the number of page frames in holes in a zone on a node */
2551unsigned long __init zone_absent_pages_in_node(int nid, 2522unsigned long __meminit zone_absent_pages_in_node(int nid,
2552 unsigned long zone_type, 2523 unsigned long zone_type,
2553 unsigned long *ignored) 2524 unsigned long *ignored)
2554{ 2525{
@@ -2584,7 +2555,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
2584 2555
2585#endif 2556#endif
2586 2557
2587static void __init calculate_node_totalpages(struct pglist_data *pgdat, 2558static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
2588 unsigned long *zones_size, unsigned long *zholes_size) 2559 unsigned long *zones_size, unsigned long *zholes_size)
2589{ 2560{
2590 unsigned long realtotalpages, totalpages = 0; 2561 unsigned long realtotalpages, totalpages = 0;
@@ -2692,7 +2663,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2692 } 2663 }
2693} 2664}
2694 2665
2695static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2666static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
2696{ 2667{
2697 /* Skip empty nodes */ 2668 /* Skip empty nodes */
2698 if (!pgdat->node_spanned_pages) 2669 if (!pgdat->node_spanned_pages)
@@ -2718,7 +2689,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2718 map = alloc_bootmem_node(pgdat, size); 2689 map = alloc_bootmem_node(pgdat, size);
2719 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2690 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
2720 } 2691 }
2721#ifdef CONFIG_FLATMEM 2692#ifndef CONFIG_NEED_MULTIPLE_NODES
2722 /* 2693 /*
2723 * With no DISCONTIG, the global mem_map is just set as node 0's 2694 * With no DISCONTIG, the global mem_map is just set as node 0's
2724 */ 2695 */
@@ -2747,6 +2718,26 @@ void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
2747} 2718}
2748 2719
2749#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2720#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2721
2722#if MAX_NUMNODES > 1
2723/*
2724 * Figure out the number of possible node ids.
2725 */
2726static void __init setup_nr_node_ids(void)
2727{
2728 unsigned int node;
2729 unsigned int highest = 0;
2730
2731 for_each_node_mask(node, node_possible_map)
2732 highest = node;
2733 nr_node_ids = highest + 1;
2734}
2735#else
2736static inline void setup_nr_node_ids(void)
2737{
2738}
2739#endif
2740
2750/** 2741/**
2751 * add_active_range - Register a range of PFNs backed by physical memory 2742 * add_active_range - Register a range of PFNs backed by physical memory
2752 * @nid: The node ID the range resides on 2743 * @nid: The node ID the range resides on
@@ -3012,7 +3003,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
3012{ 3003{
3013 int cpu = (unsigned long)hcpu; 3004 int cpu = (unsigned long)hcpu;
3014 3005
3015 if (action == CPU_DEAD) { 3006 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3016 local_irq_disable(); 3007 local_irq_disable();
3017 __drain_pages(cpu); 3008 __drain_pages(cpu);
3018 vm_events_fold_cpu(cpu); 3009 vm_events_fold_cpu(cpu);