diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 30d5093a099d..e0f2cdf9d8b1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1153,10 +1153,10 @@ again: | |||
1153 | * properly detect and handle allocation failures. | 1153 | * properly detect and handle allocation failures. |
1154 | * | 1154 | * |
1155 | * We most definitely don't want callers attempting to | 1155 | * We most definitely don't want callers attempting to |
1156 | * allocate greater than single-page units with | 1156 | * allocate greater than order-1 page units with |
1157 | * __GFP_NOFAIL. | 1157 | * __GFP_NOFAIL. |
1158 | */ | 1158 | */ |
1159 | WARN_ON_ONCE(order > 0); | 1159 | WARN_ON_ONCE(order > 1); |
1160 | } | 1160 | } |
1161 | spin_lock_irqsave(&zone->lock, flags); | 1161 | spin_lock_irqsave(&zone->lock, flags); |
1162 | page = __rmqueue(zone, order, migratetype); | 1162 | page = __rmqueue(zone, order, migratetype); |
@@ -3026,7 +3026,7 @@ bad: | |||
3026 | if (dzone == zone) | 3026 | if (dzone == zone) |
3027 | break; | 3027 | break; |
3028 | kfree(zone_pcp(dzone, cpu)); | 3028 | kfree(zone_pcp(dzone, cpu)); |
3029 | zone_pcp(dzone, cpu) = NULL; | 3029 | zone_pcp(dzone, cpu) = &boot_pageset[cpu]; |
3030 | } | 3030 | } |
3031 | return -ENOMEM; | 3031 | return -ENOMEM; |
3032 | } | 3032 | } |
@@ -3041,7 +3041,7 @@ static inline void free_zone_pagesets(int cpu) | |||
3041 | /* Free per_cpu_pageset if it is slab allocated */ | 3041 | /* Free per_cpu_pageset if it is slab allocated */ |
3042 | if (pset != &boot_pageset[cpu]) | 3042 | if (pset != &boot_pageset[cpu]) |
3043 | kfree(pset); | 3043 | kfree(pset); |
3044 | zone_pcp(zone, cpu) = NULL; | 3044 | zone_pcp(zone, cpu) = &boot_pageset[cpu]; |
3045 | } | 3045 | } |
3046 | } | 3046 | } |
3047 | 3047 | ||
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | |||
4032 | int i, nid; | 4032 | int i, nid; |
4033 | unsigned long usable_startpfn; | 4033 | unsigned long usable_startpfn; |
4034 | unsigned long kernelcore_node, kernelcore_remaining; | 4034 | unsigned long kernelcore_node, kernelcore_remaining; |
4035 | /* save the state before borrow the nodemask */ | ||
4036 | nodemask_t saved_node_state = node_states[N_HIGH_MEMORY]; | ||
4035 | unsigned long totalpages = early_calculate_totalpages(); | 4037 | unsigned long totalpages = early_calculate_totalpages(); |
4036 | int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); | 4038 | int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); |
4037 | 4039 | ||
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | |||
4059 | 4061 | ||
4060 | /* If kernelcore was not specified, there is no ZONE_MOVABLE */ | 4062 | /* If kernelcore was not specified, there is no ZONE_MOVABLE */ |
4061 | if (!required_kernelcore) | 4063 | if (!required_kernelcore) |
4062 | return; | 4064 | goto out; |
4063 | 4065 | ||
4064 | /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ | 4066 | /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ |
4065 | find_usable_zone_for_movable(); | 4067 | find_usable_zone_for_movable(); |
@@ -4158,6 +4160,10 @@ restart: | |||
4158 | for (nid = 0; nid < MAX_NUMNODES; nid++) | 4160 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
4159 | zone_movable_pfn[nid] = | 4161 | zone_movable_pfn[nid] = |
4160 | roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); | 4162 | roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); |
4163 | |||
4164 | out: | ||
4165 | /* restore the node_state */ | ||
4166 | node_states[N_HIGH_MEMORY] = saved_node_state; | ||
4161 | } | 4167 | } |
4162 | 4168 | ||
4163 | /* Any regular memory on that node ? */ | 4169 | /* Any regular memory on that node ? */ |
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
4242 | early_node_map[i].start_pfn, | 4248 | early_node_map[i].start_pfn, |
4243 | early_node_map[i].end_pfn); | 4249 | early_node_map[i].end_pfn); |
4244 | 4250 | ||
4245 | /* | ||
4246 | * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init | ||
4247 | * that node_mask, clear it at first | ||
4248 | */ | ||
4249 | nodes_clear(node_states[N_HIGH_MEMORY]); | ||
4250 | /* Initialise every node */ | 4251 | /* Initialise every node */ |
4251 | mminit_verify_pageflags_layout(); | 4252 | mminit_verify_pageflags_layout(); |
4252 | setup_nr_node_ids(); | 4253 | setup_nr_node_ids(); |
@@ -4659,7 +4660,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | |||
4659 | ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 4660 | ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); |
4660 | if (!write || (ret == -EINVAL)) | 4661 | if (!write || (ret == -EINVAL)) |
4661 | return ret; | 4662 | return ret; |
4662 | for_each_zone(zone) { | 4663 | for_each_populated_zone(zone) { |
4663 | for_each_online_cpu(cpu) { | 4664 | for_each_online_cpu(cpu) { |
4664 | unsigned long high; | 4665 | unsigned long high; |
4665 | high = zone->present_pages / percpu_pagelist_fraction; | 4666 | high = zone->present_pages / percpu_pagelist_fraction; |