aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c184
1 files changed, 118 insertions, 66 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e4092704c1a9..90c1439549fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -104,19 +104,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is 104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification). 105 * guaranteed not to run in parallel with that modification).
106 */ 106 */
107void set_gfp_allowed_mask(gfp_t mask) 107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
108{ 111{
109 WARN_ON(!mutex_is_locked(&pm_mutex)); 112 WARN_ON(!mutex_is_locked(&pm_mutex));
110 gfp_allowed_mask = mask; 113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
111} 117}
112 118
113gfp_t clear_gfp_allowed_mask(gfp_t mask) 119void pm_restrict_gfp_mask(void)
114{ 120{
115 gfp_t ret = gfp_allowed_mask;
116
117 WARN_ON(!mutex_is_locked(&pm_mutex)); 121 WARN_ON(!mutex_is_locked(&pm_mutex));
118 gfp_allowed_mask &= ~mask; 122 WARN_ON(saved_gfp_mask);
119 return ret; 123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
120} 125}
121#endif /* CONFIG_PM_SLEEP */ 126#endif /* CONFIG_PM_SLEEP */
122 127
@@ -352,6 +357,7 @@ void prep_compound_page(struct page *page, unsigned long order)
352 } 357 }
353} 358}
354 359
360/* update __split_huge_page_refcount if you change this function */
355static int destroy_compound_page(struct page *page, unsigned long order) 361static int destroy_compound_page(struct page *page, unsigned long order)
356{ 362{
357 int i; 363 int i;
@@ -421,18 +427,10 @@ static inline void rmv_page_order(struct page *page)
421 * 427 *
422 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 428 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
423 */ 429 */
424static inline struct page *
425__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
426{
427 unsigned long buddy_idx = page_idx ^ (1 << order);
428
429 return page + (buddy_idx - page_idx);
430}
431
432static inline unsigned long 430static inline unsigned long
433__find_combined_index(unsigned long page_idx, unsigned int order) 431__find_buddy_index(unsigned long page_idx, unsigned int order)
434{ 432{
435 return (page_idx & ~(1 << order)); 433 return page_idx ^ (1 << order);
436} 434}
437 435
438/* 436/*
@@ -443,8 +441,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
443 * (c) a page and its buddy have the same order && 441 * (c) a page and its buddy have the same order &&
444 * (d) a page and its buddy are in the same zone. 442 * (d) a page and its buddy are in the same zone.
445 * 443 *
446 * For recording whether a page is in the buddy system, we use PG_buddy. 444 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
447 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 445 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
448 * 446 *
449 * For recording page's order, we use page_private(page). 447 * For recording page's order, we use page_private(page).
450 */ 448 */
@@ -477,7 +475,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
477 * as necessary, plus some accounting needed to play nicely with other 475 * as necessary, plus some accounting needed to play nicely with other
478 * parts of the VM system. 476 * parts of the VM system.
479 * At each level, we keep a list of pages, which are heads of continuous 477 * At each level, we keep a list of pages, which are heads of continuous
480 * free pages of length of (1 << order) and marked with PG_buddy. Page's 478 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
481 * order is recorded in page_private(page) field. 479 * order is recorded in page_private(page) field.
482 * So when we are allocating or freeing one, we can derive the state of the 480 * So when we are allocating or freeing one, we can derive the state of the
483 * other. That is, if we allocate a small block, and both were 481 * other. That is, if we allocate a small block, and both were
@@ -494,6 +492,7 @@ static inline void __free_one_page(struct page *page,
494{ 492{
495 unsigned long page_idx; 493 unsigned long page_idx;
496 unsigned long combined_idx; 494 unsigned long combined_idx;
495 unsigned long uninitialized_var(buddy_idx);
497 struct page *buddy; 496 struct page *buddy;
498 497
499 if (unlikely(PageCompound(page))) 498 if (unlikely(PageCompound(page)))
@@ -508,7 +507,8 @@ static inline void __free_one_page(struct page *page,
508 VM_BUG_ON(bad_range(zone, page)); 507 VM_BUG_ON(bad_range(zone, page));
509 508
510 while (order < MAX_ORDER-1) { 509 while (order < MAX_ORDER-1) {
511 buddy = __page_find_buddy(page, page_idx, order); 510 buddy_idx = __find_buddy_index(page_idx, order);
511 buddy = page + (buddy_idx - page_idx);
512 if (!page_is_buddy(page, buddy, order)) 512 if (!page_is_buddy(page, buddy, order))
513 break; 513 break;
514 514
@@ -516,7 +516,7 @@ static inline void __free_one_page(struct page *page,
516 list_del(&buddy->lru); 516 list_del(&buddy->lru);
517 zone->free_area[order].nr_free--; 517 zone->free_area[order].nr_free--;
518 rmv_page_order(buddy); 518 rmv_page_order(buddy);
519 combined_idx = __find_combined_index(page_idx, order); 519 combined_idx = buddy_idx & page_idx;
520 page = page + (combined_idx - page_idx); 520 page = page + (combined_idx - page_idx);
521 page_idx = combined_idx; 521 page_idx = combined_idx;
522 order++; 522 order++;
@@ -533,9 +533,10 @@ static inline void __free_one_page(struct page *page,
533 */ 533 */
534 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 534 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
535 struct page *higher_page, *higher_buddy; 535 struct page *higher_page, *higher_buddy;
536 combined_idx = __find_combined_index(page_idx, order); 536 combined_idx = buddy_idx & page_idx;
537 higher_page = page + combined_idx - page_idx; 537 higher_page = page + (combined_idx - page_idx);
538 higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1); 538 buddy_idx = __find_buddy_index(combined_idx, order + 1);
539 higher_buddy = page + (buddy_idx - combined_idx);
539 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 540 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
540 list_add_tail(&page->lru, 541 list_add_tail(&page->lru,
541 &zone->free_area[order].free_list[migratetype]); 542 &zone->free_area[order].free_list[migratetype]);
@@ -646,13 +647,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
646 trace_mm_page_free_direct(page, order); 647 trace_mm_page_free_direct(page, order);
647 kmemcheck_free_shadow(page, order); 648 kmemcheck_free_shadow(page, order);
648 649
649 for (i = 0; i < (1 << order); i++) { 650 if (PageAnon(page))
650 struct page *pg = page + i; 651 page->mapping = NULL;
651 652 for (i = 0; i < (1 << order); i++)
652 if (PageAnon(pg)) 653 bad += free_pages_check(page + i);
653 pg->mapping = NULL;
654 bad += free_pages_check(pg);
655 }
656 if (bad) 654 if (bad)
657 return false; 655 return false;
658 656
@@ -1455,24 +1453,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1455#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1453#endif /* CONFIG_FAIL_PAGE_ALLOC */
1456 1454
1457/* 1455/*
1458 * Return 1 if free pages are above 'mark'. This takes into account the order 1456 * Return true if free pages are above 'mark'. This takes into account the order
1459 * of the allocation. 1457 * of the allocation.
1460 */ 1458 */
1461int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1459static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1462 int classzone_idx, int alloc_flags) 1460 int classzone_idx, int alloc_flags, long free_pages)
1463{ 1461{
1464 /* free_pages my go negative - that's OK */ 1462 /* free_pages my go negative - that's OK */
1465 long min = mark; 1463 long min = mark;
1466 long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
1467 int o; 1464 int o;
1468 1465
1466 free_pages -= (1 << order) + 1;
1469 if (alloc_flags & ALLOC_HIGH) 1467 if (alloc_flags & ALLOC_HIGH)
1470 min -= min / 2; 1468 min -= min / 2;
1471 if (alloc_flags & ALLOC_HARDER) 1469 if (alloc_flags & ALLOC_HARDER)
1472 min -= min / 4; 1470 min -= min / 4;
1473 1471
1474 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1472 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1475 return 0; 1473 return false;
1476 for (o = 0; o < order; o++) { 1474 for (o = 0; o < order; o++) {
1477 /* At the next order, this order's pages become unavailable */ 1475 /* At the next order, this order's pages become unavailable */
1478 free_pages -= z->free_area[o].nr_free << o; 1476 free_pages -= z->free_area[o].nr_free << o;
@@ -1481,9 +1479,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1481 min >>= 1; 1479 min >>= 1;
1482 1480
1483 if (free_pages <= min) 1481 if (free_pages <= min)
1484 return 0; 1482 return false;
1485 } 1483 }
1486 return 1; 1484 return true;
1485}
1486
1487bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1488 int classzone_idx, int alloc_flags)
1489{
1490 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1491 zone_page_state(z, NR_FREE_PAGES));
1492}
1493
1494bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1495 int classzone_idx, int alloc_flags)
1496{
1497 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1498
1499 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1500 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1501
1502 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1503 free_pages);
1487} 1504}
1488 1505
1489#ifdef CONFIG_NUMA 1506#ifdef CONFIG_NUMA
@@ -1788,15 +1805,18 @@ static struct page *
1788__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1805__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1789 struct zonelist *zonelist, enum zone_type high_zoneidx, 1806 struct zonelist *zonelist, enum zone_type high_zoneidx,
1790 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1807 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1791 int migratetype, unsigned long *did_some_progress) 1808 int migratetype, unsigned long *did_some_progress,
1809 bool sync_migration)
1792{ 1810{
1793 struct page *page; 1811 struct page *page;
1794 1812
1795 if (!order || compaction_deferred(preferred_zone)) 1813 if (!order || compaction_deferred(preferred_zone))
1796 return NULL; 1814 return NULL;
1797 1815
1816 current->flags |= PF_MEMALLOC;
1798 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1817 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1799 nodemask); 1818 nodemask, sync_migration);
1819 current->flags &= ~PF_MEMALLOC;
1800 if (*did_some_progress != COMPACT_SKIPPED) { 1820 if (*did_some_progress != COMPACT_SKIPPED) {
1801 1821
1802 /* Page migration frees to the PCP lists but we want merging */ 1822 /* Page migration frees to the PCP lists but we want merging */
@@ -1832,7 +1852,8 @@ static inline struct page *
1832__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1852__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1833 struct zonelist *zonelist, enum zone_type high_zoneidx, 1853 struct zonelist *zonelist, enum zone_type high_zoneidx,
1834 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1854 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1835 int migratetype, unsigned long *did_some_progress) 1855 int migratetype, unsigned long *did_some_progress,
1856 bool sync_migration)
1836{ 1857{
1837 return NULL; 1858 return NULL;
1838} 1859}
@@ -1847,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1847{ 1868{
1848 struct page *page = NULL; 1869 struct page *page = NULL;
1849 struct reclaim_state reclaim_state; 1870 struct reclaim_state reclaim_state;
1850 struct task_struct *p = current;
1851 bool drained = false; 1871 bool drained = false;
1852 1872
1853 cond_resched(); 1873 cond_resched();
1854 1874
1855 /* We now go into synchronous reclaim */ 1875 /* We now go into synchronous reclaim */
1856 cpuset_memory_pressure_bump(); 1876 cpuset_memory_pressure_bump();
1857 p->flags |= PF_MEMALLOC; 1877 current->flags |= PF_MEMALLOC;
1858 lockdep_set_current_reclaim_state(gfp_mask); 1878 lockdep_set_current_reclaim_state(gfp_mask);
1859 reclaim_state.reclaimed_slab = 0; 1879 reclaim_state.reclaimed_slab = 0;
1860 p->reclaim_state = &reclaim_state; 1880 current->reclaim_state = &reclaim_state;
1861 1881
1862 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 1882 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1863 1883
1864 p->reclaim_state = NULL; 1884 current->reclaim_state = NULL;
1865 lockdep_clear_current_reclaim_state(); 1885 lockdep_clear_current_reclaim_state();
1866 p->flags &= ~PF_MEMALLOC; 1886 current->flags &= ~PF_MEMALLOC;
1867 1887
1868 cond_resched(); 1888 cond_resched();
1869 1889
@@ -1915,19 +1935,19 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1915 1935
1916static inline 1936static inline
1917void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, 1937void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1918 enum zone_type high_zoneidx) 1938 enum zone_type high_zoneidx,
1939 enum zone_type classzone_idx)
1919{ 1940{
1920 struct zoneref *z; 1941 struct zoneref *z;
1921 struct zone *zone; 1942 struct zone *zone;
1922 1943
1923 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1944 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1924 wakeup_kswapd(zone, order); 1945 wakeup_kswapd(zone, order, classzone_idx);
1925} 1946}
1926 1947
1927static inline int 1948static inline int
1928gfp_to_alloc_flags(gfp_t gfp_mask) 1949gfp_to_alloc_flags(gfp_t gfp_mask)
1929{ 1950{
1930 struct task_struct *p = current;
1931 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 1951 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1932 const gfp_t wait = gfp_mask & __GFP_WAIT; 1952 const gfp_t wait = gfp_mask & __GFP_WAIT;
1933 1953
@@ -1943,18 +1963,23 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
1943 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 1963 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1944 1964
1945 if (!wait) { 1965 if (!wait) {
1946 alloc_flags |= ALLOC_HARDER; 1966 /*
1967 * Not worth trying to allocate harder for
1968 * __GFP_NOMEMALLOC even if it can't schedule.
1969 */
1970 if (!(gfp_mask & __GFP_NOMEMALLOC))
1971 alloc_flags |= ALLOC_HARDER;
1947 /* 1972 /*
1948 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1973 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1949 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1974 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1950 */ 1975 */
1951 alloc_flags &= ~ALLOC_CPUSET; 1976 alloc_flags &= ~ALLOC_CPUSET;
1952 } else if (unlikely(rt_task(p)) && !in_interrupt()) 1977 } else if (unlikely(rt_task(current)) && !in_interrupt())
1953 alloc_flags |= ALLOC_HARDER; 1978 alloc_flags |= ALLOC_HARDER;
1954 1979
1955 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 1980 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1956 if (!in_interrupt() && 1981 if (!in_interrupt() &&
1957 ((p->flags & PF_MEMALLOC) || 1982 ((current->flags & PF_MEMALLOC) ||
1958 unlikely(test_thread_flag(TIF_MEMDIE)))) 1983 unlikely(test_thread_flag(TIF_MEMDIE))))
1959 alloc_flags |= ALLOC_NO_WATERMARKS; 1984 alloc_flags |= ALLOC_NO_WATERMARKS;
1960 } 1985 }
@@ -1973,7 +1998,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1973 int alloc_flags; 1998 int alloc_flags;
1974 unsigned long pages_reclaimed = 0; 1999 unsigned long pages_reclaimed = 0;
1975 unsigned long did_some_progress; 2000 unsigned long did_some_progress;
1976 struct task_struct *p = current; 2001 bool sync_migration = false;
1977 2002
1978 /* 2003 /*
1979 * In the slowpath, we sanity check order to avoid ever trying to 2004 * In the slowpath, we sanity check order to avoid ever trying to
@@ -1998,7 +2023,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1998 goto nopage; 2023 goto nopage;
1999 2024
2000restart: 2025restart:
2001 wake_all_kswapd(order, zonelist, high_zoneidx); 2026 if (!(gfp_mask & __GFP_NO_KSWAPD))
2027 wake_all_kswapd(order, zonelist, high_zoneidx,
2028 zone_idx(preferred_zone));
2002 2029
2003 /* 2030 /*
2004 * OK, we're below the kswapd watermark and have kicked background 2031 * OK, we're below the kswapd watermark and have kicked background
@@ -2029,21 +2056,26 @@ rebalance:
2029 goto nopage; 2056 goto nopage;
2030 2057
2031 /* Avoid recursion of direct reclaim */ 2058 /* Avoid recursion of direct reclaim */
2032 if (p->flags & PF_MEMALLOC) 2059 if (current->flags & PF_MEMALLOC)
2033 goto nopage; 2060 goto nopage;
2034 2061
2035 /* Avoid allocations with no watermarks from looping endlessly */ 2062 /* Avoid allocations with no watermarks from looping endlessly */
2036 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2063 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2037 goto nopage; 2064 goto nopage;
2038 2065
2039 /* Try direct compaction */ 2066 /*
2067 * Try direct compaction. The first pass is asynchronous. Subsequent
2068 * attempts after direct reclaim are synchronous
2069 */
2040 page = __alloc_pages_direct_compact(gfp_mask, order, 2070 page = __alloc_pages_direct_compact(gfp_mask, order,
2041 zonelist, high_zoneidx, 2071 zonelist, high_zoneidx,
2042 nodemask, 2072 nodemask,
2043 alloc_flags, preferred_zone, 2073 alloc_flags, preferred_zone,
2044 migratetype, &did_some_progress); 2074 migratetype, &did_some_progress,
2075 sync_migration);
2045 if (page) 2076 if (page)
2046 goto got_pg; 2077 goto got_pg;
2078 sync_migration = true;
2047 2079
2048 /* Try direct reclaim and then allocating */ 2080 /* Try direct reclaim and then allocating */
2049 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2081 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2097,13 +2129,27 @@ rebalance:
2097 /* Wait for some write requests to complete then retry */ 2129 /* Wait for some write requests to complete then retry */
2098 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2130 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2099 goto rebalance; 2131 goto rebalance;
2132 } else {
2133 /*
2134 * High-order allocations do not necessarily loop after
2135 * direct reclaim and reclaim/compaction depends on compaction
2136 * being called after reclaim so call directly if necessary
2137 */
2138 page = __alloc_pages_direct_compact(gfp_mask, order,
2139 zonelist, high_zoneidx,
2140 nodemask,
2141 alloc_flags, preferred_zone,
2142 migratetype, &did_some_progress,
2143 sync_migration);
2144 if (page)
2145 goto got_pg;
2100 } 2146 }
2101 2147
2102nopage: 2148nopage:
2103 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2149 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2104 printk(KERN_WARNING "%s: page allocation failure." 2150 printk(KERN_WARNING "%s: page allocation failure."
2105 " order:%d, mode:0x%x\n", 2151 " order:%d, mode:0x%x\n",
2106 p->comm, order, gfp_mask); 2152 current->comm, order, gfp_mask);
2107 dump_stack(); 2153 dump_stack();
2108 show_mem(); 2154 show_mem();
2109 } 2155 }
@@ -2437,7 +2483,7 @@ void show_free_areas(void)
2437 " all_unreclaimable? %s" 2483 " all_unreclaimable? %s"
2438 "\n", 2484 "\n",
2439 zone->name, 2485 zone->name,
2440 K(zone_nr_free_pages(zone)), 2486 K(zone_page_state(zone, NR_FREE_PAGES)),
2441 K(min_wmark_pages(zone)), 2487 K(min_wmark_pages(zone)),
2442 K(low_wmark_pages(zone)), 2488 K(low_wmark_pages(zone)),
2443 K(high_wmark_pages(zone)), 2489 K(high_wmark_pages(zone)),
@@ -2580,9 +2626,16 @@ static int __parse_numa_zonelist_order(char *s)
2580 2626
2581static __init int setup_numa_zonelist_order(char *s) 2627static __init int setup_numa_zonelist_order(char *s)
2582{ 2628{
2583 if (s) 2629 int ret;
2584 return __parse_numa_zonelist_order(s); 2630
2585 return 0; 2631 if (!s)
2632 return 0;
2633
2634 ret = __parse_numa_zonelist_order(s);
2635 if (ret == 0)
2636 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2637
2638 return ret;
2586} 2639}
2587early_param("numa_zonelist_order", setup_numa_zonelist_order); 2640early_param("numa_zonelist_order", setup_numa_zonelist_order);
2588 2641
@@ -4009,7 +4062,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
4009 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 4062 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
4010} 4063}
4011#else 4064#else
4012static void inline setup_usemap(struct pglist_data *pgdat, 4065static inline void setup_usemap(struct pglist_data *pgdat,
4013 struct zone *zone, unsigned long zonesize) {} 4066 struct zone *zone, unsigned long zonesize) {}
4014#endif /* CONFIG_SPARSEMEM */ 4067#endif /* CONFIG_SPARSEMEM */
4015 4068
@@ -5512,7 +5565,6 @@ static struct trace_print_flags pageflag_names[] = {
5512 {1UL << PG_swapcache, "swapcache" }, 5565 {1UL << PG_swapcache, "swapcache" },
5513 {1UL << PG_mappedtodisk, "mappedtodisk" }, 5566 {1UL << PG_mappedtodisk, "mappedtodisk" },
5514 {1UL << PG_reclaim, "reclaim" }, 5567 {1UL << PG_reclaim, "reclaim" },
5515 {1UL << PG_buddy, "buddy" },
5516 {1UL << PG_swapbacked, "swapbacked" }, 5568 {1UL << PG_swapbacked, "swapbacked" },
5517 {1UL << PG_unevictable, "unevictable" }, 5569 {1UL << PG_unevictable, "unevictable" },
5518#ifdef CONFIG_MMU 5570#ifdef CONFIG_MMU
@@ -5560,7 +5612,7 @@ void dump_page(struct page *page)
5560{ 5612{
5561 printk(KERN_ALERT 5613 printk(KERN_ALERT
5562 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 5614 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5563 page, page_count(page), page_mapcount(page), 5615 page, atomic_read(&page->_count), page_mapcount(page),
5564 page->mapping, page->index); 5616 page->mapping, page->index);
5565 dump_page_flags(page->flags); 5617 dump_page_flags(page->flags);
5566} 5618}