aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aecc9cdfdfce..caa92689aac9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1153,10 +1153,10 @@ again:
1153 * properly detect and handle allocation failures. 1153 * properly detect and handle allocation failures.
1154 * 1154 *
1155 * We most definitely don't want callers attempting to 1155 * We most definitely don't want callers attempting to
1156 * allocate greater than single-page units with 1156 * allocate greater than order-1 page units with
1157 * __GFP_NOFAIL. 1157 * __GFP_NOFAIL.
1158 */ 1158 */
1159 WARN_ON_ONCE(order > 0); 1159 WARN_ON_ONCE(order > 1);
1160 } 1160 }
1161 spin_lock_irqsave(&zone->lock, flags); 1161 spin_lock_irqsave(&zone->lock, flags);
1162 page = __rmqueue(zone, order, migratetype); 1162 page = __rmqueue(zone, order, migratetype);
@@ -1666,7 +1666,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1666 preferred_zone, migratetype); 1666 preferred_zone, migratetype);
1667 1667
1668 if (!page && gfp_mask & __GFP_NOFAIL) 1668 if (!page && gfp_mask & __GFP_NOFAIL)
1669 congestion_wait(WRITE, HZ/50); 1669 congestion_wait(BLK_RW_ASYNC, HZ/50);
1670 } while (!page && (gfp_mask & __GFP_NOFAIL)); 1670 } while (!page && (gfp_mask & __GFP_NOFAIL));
1671 1671
1672 return page; 1672 return page;
@@ -1831,7 +1831,7 @@ rebalance:
1831 pages_reclaimed += did_some_progress; 1831 pages_reclaimed += did_some_progress;
1832 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { 1832 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1833 /* Wait for some write requests to complete then retry */ 1833 /* Wait for some write requests to complete then retry */
1834 congestion_wait(WRITE, HZ/50); 1834 congestion_wait(BLK_RW_ASYNC, HZ/50);
1835 goto rebalance; 1835 goto rebalance;
1836 } 1836 }
1837 1837
@@ -1983,7 +1983,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1983 unsigned long alloc_end = addr + (PAGE_SIZE << order); 1983 unsigned long alloc_end = addr + (PAGE_SIZE << order);
1984 unsigned long used = addr + PAGE_ALIGN(size); 1984 unsigned long used = addr + PAGE_ALIGN(size);
1985 1985
1986 split_page(virt_to_page(addr), order); 1986 split_page(virt_to_page((void *)addr), order);
1987 while (used < alloc_end) { 1987 while (used < alloc_end) {
1988 free_page(used); 1988 free_page(used);
1989 used += PAGE_SIZE; 1989 used += PAGE_SIZE;
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4032 int i, nid; 4032 int i, nid;
4033 unsigned long usable_startpfn; 4033 unsigned long usable_startpfn;
4034 unsigned long kernelcore_node, kernelcore_remaining; 4034 unsigned long kernelcore_node, kernelcore_remaining;
4035 /* save the state before borrow the nodemask */
4036 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4035 unsigned long totalpages = early_calculate_totalpages(); 4037 unsigned long totalpages = early_calculate_totalpages();
4036 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 4038 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4037 4039
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4059 4061
4060 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 4062 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4061 if (!required_kernelcore) 4063 if (!required_kernelcore)
4062 return; 4064 goto out;
4063 4065
4064 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 4066 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4065 find_usable_zone_for_movable(); 4067 find_usable_zone_for_movable();
@@ -4158,6 +4160,10 @@ restart:
4158 for (nid = 0; nid < MAX_NUMNODES; nid++) 4160 for (nid = 0; nid < MAX_NUMNODES; nid++)
4159 zone_movable_pfn[nid] = 4161 zone_movable_pfn[nid] =
4160 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 4162 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4163
4164out:
4165 /* restore the node_state */
4166 node_states[N_HIGH_MEMORY] = saved_node_state;
4161} 4167}
4162 4168
4163/* Any regular memory on that node ? */ 4169/* Any regular memory on that node ? */
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4242 early_node_map[i].start_pfn, 4248 early_node_map[i].start_pfn,
4243 early_node_map[i].end_pfn); 4249 early_node_map[i].end_pfn);
4244 4250
4245 /*
4246 * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
4247 * that node_mask, clear it at first
4248 */
4249 nodes_clear(node_states[N_HIGH_MEMORY]);
4250 /* Initialise every node */ 4251 /* Initialise every node */
4251 mminit_verify_pageflags_layout(); 4252 mminit_verify_pageflags_layout();
4252 setup_nr_node_ids(); 4253 setup_nr_node_ids();
@@ -4744,8 +4745,10 @@ void *__init alloc_large_system_hash(const char *tablename,
4744 * some pages at the end of hash table which 4745 * some pages at the end of hash table which
4745 * alloc_pages_exact() automatically does 4746 * alloc_pages_exact() automatically does
4746 */ 4747 */
4747 if (get_order(size) < MAX_ORDER) 4748 if (get_order(size) < MAX_ORDER) {
4748 table = alloc_pages_exact(size, GFP_ATOMIC); 4749 table = alloc_pages_exact(size, GFP_ATOMIC);
4750 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4751 }
4749 } 4752 }
4750 } while (!table && size > PAGE_SIZE && --log2qty); 4753 } while (!table && size > PAGE_SIZE && --log2qty);
4751 4754
@@ -4763,16 +4766,6 @@ void *__init alloc_large_system_hash(const char *tablename,
4763 if (_hash_mask) 4766 if (_hash_mask)
4764 *_hash_mask = (1 << log2qty) - 1; 4767 *_hash_mask = (1 << log2qty) - 1;
4765 4768
4766 /*
4767 * If hashdist is set, the table allocation is done with __vmalloc()
4768 * which invokes the kmemleak_alloc() callback. This function may also
4769 * be called before the slab and kmemleak are initialised when
4770 * kmemleak simply buffers the request to be executed later
4771 * (GFP_ATOMIC flag ignored in this case).
4772 */
4773 if (!hashdist)
4774 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4775
4776 return table; 4769 return table;
4777} 4770}
4778 4771