aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c165
1 files changed, 106 insertions, 59 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff7e15872398..90c1439549fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -357,6 +357,7 @@ void prep_compound_page(struct page *page, unsigned long order)
357 } 357 }
358} 358}
359 359
360/* update __split_huge_page_refcount if you change this function */
360static int destroy_compound_page(struct page *page, unsigned long order) 361static int destroy_compound_page(struct page *page, unsigned long order)
361{ 362{
362 int i; 363 int i;
@@ -426,18 +427,10 @@ static inline void rmv_page_order(struct page *page)
426 * 427 *
427 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 428 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
428 */ 429 */
429static inline struct page *
430__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
431{
432 unsigned long buddy_idx = page_idx ^ (1 << order);
433
434 return page + (buddy_idx - page_idx);
435}
436
437static inline unsigned long 430static inline unsigned long
438__find_combined_index(unsigned long page_idx, unsigned int order) 431__find_buddy_index(unsigned long page_idx, unsigned int order)
439{ 432{
440 return (page_idx & ~(1 << order)); 433 return page_idx ^ (1 << order);
441} 434}
442 435
443/* 436/*
@@ -448,8 +441,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
448 * (c) a page and its buddy have the same order && 441 * (c) a page and its buddy have the same order &&
449 * (d) a page and its buddy are in the same zone. 442 * (d) a page and its buddy are in the same zone.
450 * 443 *
451 * For recording whether a page is in the buddy system, we use PG_buddy. 444 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
452 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 445 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
453 * 446 *
454 * For recording page's order, we use page_private(page). 447 * For recording page's order, we use page_private(page).
455 */ 448 */
@@ -482,7 +475,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
482 * as necessary, plus some accounting needed to play nicely with other 475 * as necessary, plus some accounting needed to play nicely with other
483 * parts of the VM system. 476 * parts of the VM system.
484 * At each level, we keep a list of pages, which are heads of continuous 477 * At each level, we keep a list of pages, which are heads of continuous
485 * free pages of length of (1 << order) and marked with PG_buddy. Page's 478 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
486 * order is recorded in page_private(page) field. 479 * order is recorded in page_private(page) field.
487 * So when we are allocating or freeing one, we can derive the state of the 480 * So when we are allocating or freeing one, we can derive the state of the
488 * other. That is, if we allocate a small block, and both were 481 * other. That is, if we allocate a small block, and both were
@@ -499,6 +492,7 @@ static inline void __free_one_page(struct page *page,
499{ 492{
500 unsigned long page_idx; 493 unsigned long page_idx;
501 unsigned long combined_idx; 494 unsigned long combined_idx;
495 unsigned long uninitialized_var(buddy_idx);
502 struct page *buddy; 496 struct page *buddy;
503 497
504 if (unlikely(PageCompound(page))) 498 if (unlikely(PageCompound(page)))
@@ -513,7 +507,8 @@ static inline void __free_one_page(struct page *page,
513 VM_BUG_ON(bad_range(zone, page)); 507 VM_BUG_ON(bad_range(zone, page));
514 508
515 while (order < MAX_ORDER-1) { 509 while (order < MAX_ORDER-1) {
516 buddy = __page_find_buddy(page, page_idx, order); 510 buddy_idx = __find_buddy_index(page_idx, order);
511 buddy = page + (buddy_idx - page_idx);
517 if (!page_is_buddy(page, buddy, order)) 512 if (!page_is_buddy(page, buddy, order))
518 break; 513 break;
519 514
@@ -521,7 +516,7 @@ static inline void __free_one_page(struct page *page,
521 list_del(&buddy->lru); 516 list_del(&buddy->lru);
522 zone->free_area[order].nr_free--; 517 zone->free_area[order].nr_free--;
523 rmv_page_order(buddy); 518 rmv_page_order(buddy);
524 combined_idx = __find_combined_index(page_idx, order); 519 combined_idx = buddy_idx & page_idx;
525 page = page + (combined_idx - page_idx); 520 page = page + (combined_idx - page_idx);
526 page_idx = combined_idx; 521 page_idx = combined_idx;
527 order++; 522 order++;
@@ -538,9 +533,10 @@ static inline void __free_one_page(struct page *page,
538 */ 533 */
539 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 534 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
540 struct page *higher_page, *higher_buddy; 535 struct page *higher_page, *higher_buddy;
541 combined_idx = __find_combined_index(page_idx, order); 536 combined_idx = buddy_idx & page_idx;
542 higher_page = page + combined_idx - page_idx; 537 higher_page = page + (combined_idx - page_idx);
543 higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1); 538 buddy_idx = __find_buddy_index(combined_idx, order + 1);
539 higher_buddy = page + (buddy_idx - combined_idx);
544 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 540 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
545 list_add_tail(&page->lru, 541 list_add_tail(&page->lru,
546 &zone->free_area[order].free_list[migratetype]); 542 &zone->free_area[order].free_list[migratetype]);
@@ -651,13 +647,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
651 trace_mm_page_free_direct(page, order); 647 trace_mm_page_free_direct(page, order);
652 kmemcheck_free_shadow(page, order); 648 kmemcheck_free_shadow(page, order);
653 649
654 for (i = 0; i < (1 << order); i++) { 650 if (PageAnon(page))
655 struct page *pg = page + i; 651 page->mapping = NULL;
656 652 for (i = 0; i < (1 << order); i++)
657 if (PageAnon(pg)) 653 bad += free_pages_check(page + i);
658 pg->mapping = NULL;
659 bad += free_pages_check(pg);
660 }
661 if (bad) 654 if (bad)
662 return false; 655 return false;
663 656
@@ -1460,24 +1453,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1460#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1453#endif /* CONFIG_FAIL_PAGE_ALLOC */
1461 1454
1462/* 1455/*
1463 * Return 1 if free pages are above 'mark'. This takes into account the order 1456 * Return true if free pages are above 'mark'. This takes into account the order
1464 * of the allocation. 1457 * of the allocation.
1465 */ 1458 */
1466int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1459static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1467 int classzone_idx, int alloc_flags) 1460 int classzone_idx, int alloc_flags, long free_pages)
1468{ 1461{
1469 /* free_pages my go negative - that's OK */ 1462 /* free_pages my go negative - that's OK */
1470 long min = mark; 1463 long min = mark;
1471 long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
1472 int o; 1464 int o;
1473 1465
1466 free_pages -= (1 << order) + 1;
1474 if (alloc_flags & ALLOC_HIGH) 1467 if (alloc_flags & ALLOC_HIGH)
1475 min -= min / 2; 1468 min -= min / 2;
1476 if (alloc_flags & ALLOC_HARDER) 1469 if (alloc_flags & ALLOC_HARDER)
1477 min -= min / 4; 1470 min -= min / 4;
1478 1471
1479 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1472 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1480 return 0; 1473 return false;
1481 for (o = 0; o < order; o++) { 1474 for (o = 0; o < order; o++) {
1482 /* At the next order, this order's pages become unavailable */ 1475 /* At the next order, this order's pages become unavailable */
1483 free_pages -= z->free_area[o].nr_free << o; 1476 free_pages -= z->free_area[o].nr_free << o;
@@ -1486,9 +1479,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1486 min >>= 1; 1479 min >>= 1;
1487 1480
1488 if (free_pages <= min) 1481 if (free_pages <= min)
1489 return 0; 1482 return false;
1490 } 1483 }
1491 return 1; 1484 return true;
1485}
1486
1487bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1488 int classzone_idx, int alloc_flags)
1489{
1490 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1491 zone_page_state(z, NR_FREE_PAGES));
1492}
1493
1494bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1495 int classzone_idx, int alloc_flags)
1496{
1497 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1498
1499 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1500 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1501
1502 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1503 free_pages);
1492} 1504}
1493 1505
1494#ifdef CONFIG_NUMA 1506#ifdef CONFIG_NUMA
@@ -1793,15 +1805,18 @@ static struct page *
1793__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1805__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1794 struct zonelist *zonelist, enum zone_type high_zoneidx, 1806 struct zonelist *zonelist, enum zone_type high_zoneidx,
1795 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1807 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1796 int migratetype, unsigned long *did_some_progress) 1808 int migratetype, unsigned long *did_some_progress,
1809 bool sync_migration)
1797{ 1810{
1798 struct page *page; 1811 struct page *page;
1799 1812
1800 if (!order || compaction_deferred(preferred_zone)) 1813 if (!order || compaction_deferred(preferred_zone))
1801 return NULL; 1814 return NULL;
1802 1815
1816 current->flags |= PF_MEMALLOC;
1803 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1817 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1804 nodemask); 1818 nodemask, sync_migration);
1819 current->flags &= ~PF_MEMALLOC;
1805 if (*did_some_progress != COMPACT_SKIPPED) { 1820 if (*did_some_progress != COMPACT_SKIPPED) {
1806 1821
1807 /* Page migration frees to the PCP lists but we want merging */ 1822 /* Page migration frees to the PCP lists but we want merging */
@@ -1837,7 +1852,8 @@ static inline struct page *
1837__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1852__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1838 struct zonelist *zonelist, enum zone_type high_zoneidx, 1853 struct zonelist *zonelist, enum zone_type high_zoneidx,
1839 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1854 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1840 int migratetype, unsigned long *did_some_progress) 1855 int migratetype, unsigned long *did_some_progress,
1856 bool sync_migration)
1841{ 1857{
1842 return NULL; 1858 return NULL;
1843} 1859}
@@ -1852,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1852{ 1868{
1853 struct page *page = NULL; 1869 struct page *page = NULL;
1854 struct reclaim_state reclaim_state; 1870 struct reclaim_state reclaim_state;
1855 struct task_struct *p = current;
1856 bool drained = false; 1871 bool drained = false;
1857 1872
1858 cond_resched(); 1873 cond_resched();
1859 1874
1860 /* We now go into synchronous reclaim */ 1875 /* We now go into synchronous reclaim */
1861 cpuset_memory_pressure_bump(); 1876 cpuset_memory_pressure_bump();
1862 p->flags |= PF_MEMALLOC; 1877 current->flags |= PF_MEMALLOC;
1863 lockdep_set_current_reclaim_state(gfp_mask); 1878 lockdep_set_current_reclaim_state(gfp_mask);
1864 reclaim_state.reclaimed_slab = 0; 1879 reclaim_state.reclaimed_slab = 0;
1865 p->reclaim_state = &reclaim_state; 1880 current->reclaim_state = &reclaim_state;
1866 1881
1867 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 1882 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1868 1883
1869 p->reclaim_state = NULL; 1884 current->reclaim_state = NULL;
1870 lockdep_clear_current_reclaim_state(); 1885 lockdep_clear_current_reclaim_state();
1871 p->flags &= ~PF_MEMALLOC; 1886 current->flags &= ~PF_MEMALLOC;
1872 1887
1873 cond_resched(); 1888 cond_resched();
1874 1889
@@ -1920,19 +1935,19 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1920 1935
1921static inline 1936static inline
1922void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, 1937void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1923 enum zone_type high_zoneidx) 1938 enum zone_type high_zoneidx,
1939 enum zone_type classzone_idx)
1924{ 1940{
1925 struct zoneref *z; 1941 struct zoneref *z;
1926 struct zone *zone; 1942 struct zone *zone;
1927 1943
1928 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1944 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1929 wakeup_kswapd(zone, order); 1945 wakeup_kswapd(zone, order, classzone_idx);
1930} 1946}
1931 1947
1932static inline int 1948static inline int
1933gfp_to_alloc_flags(gfp_t gfp_mask) 1949gfp_to_alloc_flags(gfp_t gfp_mask)
1934{ 1950{
1935 struct task_struct *p = current;
1936 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 1951 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1937 const gfp_t wait = gfp_mask & __GFP_WAIT; 1952 const gfp_t wait = gfp_mask & __GFP_WAIT;
1938 1953
@@ -1948,18 +1963,23 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
1948 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 1963 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1949 1964
1950 if (!wait) { 1965 if (!wait) {
1951 alloc_flags |= ALLOC_HARDER; 1966 /*
1967 * Not worth trying to allocate harder for
1968 * __GFP_NOMEMALLOC even if it can't schedule.
1969 */
1970 if (!(gfp_mask & __GFP_NOMEMALLOC))
1971 alloc_flags |= ALLOC_HARDER;
1952 /* 1972 /*
1953 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1973 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1954 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1974 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1955 */ 1975 */
1956 alloc_flags &= ~ALLOC_CPUSET; 1976 alloc_flags &= ~ALLOC_CPUSET;
1957 } else if (unlikely(rt_task(p)) && !in_interrupt()) 1977 } else if (unlikely(rt_task(current)) && !in_interrupt())
1958 alloc_flags |= ALLOC_HARDER; 1978 alloc_flags |= ALLOC_HARDER;
1959 1979
1960 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 1980 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1961 if (!in_interrupt() && 1981 if (!in_interrupt() &&
1962 ((p->flags & PF_MEMALLOC) || 1982 ((current->flags & PF_MEMALLOC) ||
1963 unlikely(test_thread_flag(TIF_MEMDIE)))) 1983 unlikely(test_thread_flag(TIF_MEMDIE))))
1964 alloc_flags |= ALLOC_NO_WATERMARKS; 1984 alloc_flags |= ALLOC_NO_WATERMARKS;
1965 } 1985 }
@@ -1978,7 +1998,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1978 int alloc_flags; 1998 int alloc_flags;
1979 unsigned long pages_reclaimed = 0; 1999 unsigned long pages_reclaimed = 0;
1980 unsigned long did_some_progress; 2000 unsigned long did_some_progress;
1981 struct task_struct *p = current; 2001 bool sync_migration = false;
1982 2002
1983 /* 2003 /*
1984 * In the slowpath, we sanity check order to avoid ever trying to 2004 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2003,7 +2023,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2003 goto nopage; 2023 goto nopage;
2004 2024
2005restart: 2025restart:
2006 wake_all_kswapd(order, zonelist, high_zoneidx); 2026 if (!(gfp_mask & __GFP_NO_KSWAPD))
2027 wake_all_kswapd(order, zonelist, high_zoneidx,
2028 zone_idx(preferred_zone));
2007 2029
2008 /* 2030 /*
2009 * OK, we're below the kswapd watermark and have kicked background 2031 * OK, we're below the kswapd watermark and have kicked background
@@ -2034,21 +2056,26 @@ rebalance:
2034 goto nopage; 2056 goto nopage;
2035 2057
2036 /* Avoid recursion of direct reclaim */ 2058 /* Avoid recursion of direct reclaim */
2037 if (p->flags & PF_MEMALLOC) 2059 if (current->flags & PF_MEMALLOC)
2038 goto nopage; 2060 goto nopage;
2039 2061
2040 /* Avoid allocations with no watermarks from looping endlessly */ 2062 /* Avoid allocations with no watermarks from looping endlessly */
2041 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2063 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2042 goto nopage; 2064 goto nopage;
2043 2065
2044 /* Try direct compaction */ 2066 /*
2067 * Try direct compaction. The first pass is asynchronous. Subsequent
2068 * attempts after direct reclaim are synchronous
2069 */
2045 page = __alloc_pages_direct_compact(gfp_mask, order, 2070 page = __alloc_pages_direct_compact(gfp_mask, order,
2046 zonelist, high_zoneidx, 2071 zonelist, high_zoneidx,
2047 nodemask, 2072 nodemask,
2048 alloc_flags, preferred_zone, 2073 alloc_flags, preferred_zone,
2049 migratetype, &did_some_progress); 2074 migratetype, &did_some_progress,
2075 sync_migration);
2050 if (page) 2076 if (page)
2051 goto got_pg; 2077 goto got_pg;
2078 sync_migration = true;
2052 2079
2053 /* Try direct reclaim and then allocating */ 2080 /* Try direct reclaim and then allocating */
2054 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2081 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2102,13 +2129,27 @@ rebalance:
2102 /* Wait for some write requests to complete then retry */ 2129 /* Wait for some write requests to complete then retry */
2103 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2130 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2104 goto rebalance; 2131 goto rebalance;
2132 } else {
2133 /*
2134 * High-order allocations do not necessarily loop after
2135 * direct reclaim and reclaim/compaction depends on compaction
2136 * being called after reclaim so call directly if necessary
2137 */
2138 page = __alloc_pages_direct_compact(gfp_mask, order,
2139 zonelist, high_zoneidx,
2140 nodemask,
2141 alloc_flags, preferred_zone,
2142 migratetype, &did_some_progress,
2143 sync_migration);
2144 if (page)
2145 goto got_pg;
2105 } 2146 }
2106 2147
2107nopage: 2148nopage:
2108 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2149 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2109 printk(KERN_WARNING "%s: page allocation failure." 2150 printk(KERN_WARNING "%s: page allocation failure."
2110 " order:%d, mode:0x%x\n", 2151 " order:%d, mode:0x%x\n",
2111 p->comm, order, gfp_mask); 2152 current->comm, order, gfp_mask);
2112 dump_stack(); 2153 dump_stack();
2113 show_mem(); 2154 show_mem();
2114 } 2155 }
@@ -2442,7 +2483,7 @@ void show_free_areas(void)
2442 " all_unreclaimable? %s" 2483 " all_unreclaimable? %s"
2443 "\n", 2484 "\n",
2444 zone->name, 2485 zone->name,
2445 K(zone_nr_free_pages(zone)), 2486 K(zone_page_state(zone, NR_FREE_PAGES)),
2446 K(min_wmark_pages(zone)), 2487 K(min_wmark_pages(zone)),
2447 K(low_wmark_pages(zone)), 2488 K(low_wmark_pages(zone)),
2448 K(high_wmark_pages(zone)), 2489 K(high_wmark_pages(zone)),
@@ -2585,9 +2626,16 @@ static int __parse_numa_zonelist_order(char *s)
2585 2626
2586static __init int setup_numa_zonelist_order(char *s) 2627static __init int setup_numa_zonelist_order(char *s)
2587{ 2628{
2588 if (s) 2629 int ret;
2589 return __parse_numa_zonelist_order(s); 2630
2590 return 0; 2631 if (!s)
2632 return 0;
2633
2634 ret = __parse_numa_zonelist_order(s);
2635 if (ret == 0)
2636 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2637
2638 return ret;
2591} 2639}
2592early_param("numa_zonelist_order", setup_numa_zonelist_order); 2640early_param("numa_zonelist_order", setup_numa_zonelist_order);
2593 2641
@@ -4014,7 +4062,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
4014 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 4062 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
4015} 4063}
4016#else 4064#else
4017static void inline setup_usemap(struct pglist_data *pgdat, 4065static inline void setup_usemap(struct pglist_data *pgdat,
4018 struct zone *zone, unsigned long zonesize) {} 4066 struct zone *zone, unsigned long zonesize) {}
4019#endif /* CONFIG_SPARSEMEM */ 4067#endif /* CONFIG_SPARSEMEM */
4020 4068
@@ -5517,7 +5565,6 @@ static struct trace_print_flags pageflag_names[] = {
5517 {1UL << PG_swapcache, "swapcache" }, 5565 {1UL << PG_swapcache, "swapcache" },
5518 {1UL << PG_mappedtodisk, "mappedtodisk" }, 5566 {1UL << PG_mappedtodisk, "mappedtodisk" },
5519 {1UL << PG_reclaim, "reclaim" }, 5567 {1UL << PG_reclaim, "reclaim" },
5520 {1UL << PG_buddy, "buddy" },
5521 {1UL << PG_swapbacked, "swapbacked" }, 5568 {1UL << PG_swapbacked, "swapbacked" },
5522 {1UL << PG_unevictable, "unevictable" }, 5569 {1UL << PG_unevictable, "unevictable" },
5523#ifdef CONFIG_MMU 5570#ifdef CONFIG_MMU
@@ -5565,7 +5612,7 @@ void dump_page(struct page *page)
5565{ 5612{
5566 printk(KERN_ALERT 5613 printk(KERN_ALERT
5567 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 5614 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5568 page, page_count(page), page_mapcount(page), 5615 page, atomic_read(&page->_count), page_mapcount(page),
5569 page->mapping, page->index); 5616 page->mapping, page->index);
5570 dump_page_flags(page->flags); 5617 dump_page_flags(page->flags);
5571} 5618}