aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index af982f7cdb2a..9eb9eb928285 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -268,13 +268,14 @@ void prep_compound_page(struct page *page, unsigned long order)
268{ 268{
269 int i; 269 int i;
270 int nr_pages = 1 << order; 270 int nr_pages = 1 << order;
271 struct page *p = page + 1;
271 272
272 set_compound_page_dtor(page, free_compound_page); 273 set_compound_page_dtor(page, free_compound_page);
273 set_compound_order(page, order); 274 set_compound_order(page, order);
274 __SetPageHead(page); 275 __SetPageHead(page);
275 for (i = 1; i < nr_pages; i++) { 276 for (i = 1; i < nr_pages; i++, p++) {
276 struct page *p = page + i; 277 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
277 278 p = pfn_to_page(page_to_pfn(page) + i);
278 __SetPageTail(p); 279 __SetPageTail(p);
279 p->first_page = page; 280 p->first_page = page;
280 } 281 }
@@ -284,6 +285,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
284{ 285{
285 int i; 286 int i;
286 int nr_pages = 1 << order; 287 int nr_pages = 1 << order;
288 struct page *p = page + 1;
287 289
288 if (unlikely(compound_order(page) != order)) 290 if (unlikely(compound_order(page) != order))
289 bad_page(page); 291 bad_page(page);
@@ -291,8 +293,9 @@ static void destroy_compound_page(struct page *page, unsigned long order)
291 if (unlikely(!PageHead(page))) 293 if (unlikely(!PageHead(page)))
292 bad_page(page); 294 bad_page(page);
293 __ClearPageHead(page); 295 __ClearPageHead(page);
294 for (i = 1; i < nr_pages; i++) { 296 for (i = 1; i < nr_pages; i++, p++) {
295 struct page *p = page + i; 297 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
298 p = pfn_to_page(page_to_pfn(page) + i);
296 299
297 if (unlikely(!PageTail(p) | 300 if (unlikely(!PageTail(p) |
298 (p->first_page != page))) 301 (p->first_page != page)))
@@ -694,6 +697,9 @@ static int move_freepages(struct zone *zone,
694#endif 697#endif
695 698
696 for (page = start_page; page <= end_page;) { 699 for (page = start_page; page <= end_page;) {
700 /* Make sure we are not inadvertently changing nodes */
701 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
702
697 if (!pfn_valid_within(page_to_pfn(page))) { 703 if (!pfn_valid_within(page_to_pfn(page))) {
698 page++; 704 page++;
699 continue; 705 continue;
@@ -2516,6 +2522,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2516 continue; 2522 continue;
2517 page = pfn_to_page(pfn); 2523 page = pfn_to_page(pfn);
2518 2524
2525 /* Watch out for overlapping nodes */
2526 if (page_to_nid(page) != zone_to_nid(zone))
2527 continue;
2528
2519 /* Blocks with reserved pages will never free, skip them. */ 2529 /* Blocks with reserved pages will never free, skip them. */
2520 if (PageReserved(page)) 2530 if (PageReserved(page))
2521 continue; 2531 continue;
@@ -3942,7 +3952,7 @@ static void check_for_regular_memory(pg_data_t *pgdat)
3942void __init free_area_init_nodes(unsigned long *max_zone_pfn) 3952void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3943{ 3953{
3944 unsigned long nid; 3954 unsigned long nid;
3945 enum zone_type i; 3955 int i;
3946 3956
3947 /* Sort early_node_map as initialisation assumes it is sorted */ 3957 /* Sort early_node_map as initialisation assumes it is sorted */
3948 sort_node_map(); 3958 sort_node_map();
@@ -4064,7 +4074,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4064} 4074}
4065 4075
4066#ifndef CONFIG_NEED_MULTIPLE_NODES 4076#ifndef CONFIG_NEED_MULTIPLE_NODES
4067struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; 4077struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4068EXPORT_SYMBOL(contig_page_data); 4078EXPORT_SYMBOL(contig_page_data);
4069#endif 4079#endif
4070 4080