diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 152 |
1 files changed, 103 insertions, 49 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 79ac4afc908c..6da667274df5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve; | |||
153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; | 153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; |
154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; | 154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; |
155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ | 155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ |
156 | unsigned long __initdata required_kernelcore; | 156 | static unsigned long __initdata required_kernelcore; |
157 | static unsigned long __initdata required_movablecore; | 157 | static unsigned long __initdata required_movablecore; |
158 | unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; | 158 | static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; |
159 | 159 | ||
160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ | 160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ |
161 | int movable_zone; | 161 | int movable_zone; |
@@ -264,7 +264,7 @@ static void free_compound_page(struct page *page) | |||
264 | __free_pages_ok(page, compound_order(page)); | 264 | __free_pages_ok(page, compound_order(page)); |
265 | } | 265 | } |
266 | 266 | ||
267 | static void prep_compound_page(struct page *page, unsigned long order) | 267 | void prep_compound_page(struct page *page, unsigned long order) |
268 | { | 268 | { |
269 | int i; | 269 | int i; |
270 | int nr_pages = 1 << order; | 270 | int nr_pages = 1 << order; |
@@ -432,8 +432,9 @@ static inline void __free_one_page(struct page *page, | |||
432 | 432 | ||
433 | buddy = __page_find_buddy(page, page_idx, order); | 433 | buddy = __page_find_buddy(page, page_idx, order); |
434 | if (!page_is_buddy(page, buddy, order)) | 434 | if (!page_is_buddy(page, buddy, order)) |
435 | break; /* Move the buddy up one level. */ | 435 | break; |
436 | 436 | ||
437 | /* Our buddy is free, merge with it and move up one order. */ | ||
437 | list_del(&buddy->lru); | 438 | list_del(&buddy->lru); |
438 | zone->free_area[order].nr_free--; | 439 | zone->free_area[order].nr_free--; |
439 | rmv_page_order(buddy); | 440 | rmv_page_order(buddy); |
@@ -532,7 +533,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
532 | /* | 533 | /* |
533 | * permit the bootmem allocator to evade page validation on high-order frees | 534 | * permit the bootmem allocator to evade page validation on high-order frees |
534 | */ | 535 | */ |
535 | void __free_pages_bootmem(struct page *page, unsigned int order) | 536 | void __meminit __free_pages_bootmem(struct page *page, unsigned int order) |
536 | { | 537 | { |
537 | if (order == 0) { | 538 | if (order == 0) { |
538 | __ClearPageReserved(page); | 539 | __ClearPageReserved(page); |
@@ -673,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { | |||
673 | * Note that start_page and end_pages are not aligned on a pageblock | 674 | * Note that start_page and end_pages are not aligned on a pageblock |
674 | * boundary. If alignment is required, use move_freepages_block() | 675 | * boundary. If alignment is required, use move_freepages_block() |
675 | */ | 676 | */ |
676 | int move_freepages(struct zone *zone, | 677 | static int move_freepages(struct zone *zone, |
677 | struct page *start_page, struct page *end_page, | 678 | struct page *start_page, struct page *end_page, |
678 | int migratetype) | 679 | int migratetype) |
679 | { | 680 | { |
680 | struct page *page; | 681 | struct page *page; |
681 | unsigned long order; | 682 | unsigned long order; |
@@ -714,7 +715,8 @@ int move_freepages(struct zone *zone, | |||
714 | return pages_moved; | 715 | return pages_moved; |
715 | } | 716 | } |
716 | 717 | ||
717 | int move_freepages_block(struct zone *zone, struct page *page, int migratetype) | 718 | static int move_freepages_block(struct zone *zone, struct page *page, |
719 | int migratetype) | ||
718 | { | 720 | { |
719 | unsigned long start_pfn, end_pfn; | 721 | unsigned long start_pfn, end_pfn; |
720 | struct page *start_page, *end_page; | 722 | struct page *start_page, *end_page; |
@@ -1429,7 +1431,7 @@ try_next_zone: | |||
1429 | /* | 1431 | /* |
1430 | * This is the 'heart' of the zoned buddy allocator. | 1432 | * This is the 'heart' of the zoned buddy allocator. |
1431 | */ | 1433 | */ |
1432 | static struct page * | 1434 | struct page * |
1433 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 1435 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, |
1434 | struct zonelist *zonelist, nodemask_t *nodemask) | 1436 | struct zonelist *zonelist, nodemask_t *nodemask) |
1435 | { | 1437 | { |
@@ -1632,22 +1634,7 @@ nopage: | |||
1632 | got_pg: | 1634 | got_pg: |
1633 | return page; | 1635 | return page; |
1634 | } | 1636 | } |
1635 | 1637 | EXPORT_SYMBOL(__alloc_pages_internal); | |
1636 | struct page * | ||
1637 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | ||
1638 | struct zonelist *zonelist) | ||
1639 | { | ||
1640 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | ||
1641 | } | ||
1642 | |||
1643 | struct page * | ||
1644 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
1645 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
1646 | { | ||
1647 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | ||
1648 | } | ||
1649 | |||
1650 | EXPORT_SYMBOL(__alloc_pages); | ||
1651 | 1638 | ||
1652 | /* | 1639 | /* |
1653 | * Common helper functions. | 1640 | * Common helper functions. |
@@ -1711,6 +1698,59 @@ void free_pages(unsigned long addr, unsigned int order) | |||
1711 | 1698 | ||
1712 | EXPORT_SYMBOL(free_pages); | 1699 | EXPORT_SYMBOL(free_pages); |
1713 | 1700 | ||
1701 | /** | ||
1702 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. | ||
1703 | * @size: the number of bytes to allocate | ||
1704 | * @gfp_mask: GFP flags for the allocation | ||
1705 | * | ||
1706 | * This function is similar to alloc_pages(), except that it allocates the | ||
1707 | * minimum number of pages to satisfy the request. alloc_pages() can only | ||
1708 | * allocate memory in power-of-two pages. | ||
1709 | * | ||
1710 | * This function is also limited by MAX_ORDER. | ||
1711 | * | ||
1712 | * Memory allocated by this function must be released by free_pages_exact(). | ||
1713 | */ | ||
1714 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | ||
1715 | { | ||
1716 | unsigned int order = get_order(size); | ||
1717 | unsigned long addr; | ||
1718 | |||
1719 | addr = __get_free_pages(gfp_mask, order); | ||
1720 | if (addr) { | ||
1721 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
1722 | unsigned long used = addr + PAGE_ALIGN(size); | ||
1723 | |||
1724 | split_page(virt_to_page(addr), order); | ||
1725 | while (used < alloc_end) { | ||
1726 | free_page(used); | ||
1727 | used += PAGE_SIZE; | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | return (void *)addr; | ||
1732 | } | ||
1733 | EXPORT_SYMBOL(alloc_pages_exact); | ||
1734 | |||
1735 | /** | ||
1736 | * free_pages_exact - release memory allocated via alloc_pages_exact() | ||
1737 | * @virt: the value returned by alloc_pages_exact. | ||
1738 | * @size: size of allocation, same value as passed to alloc_pages_exact(). | ||
1739 | * | ||
1740 | * Release the memory allocated by a previous call to alloc_pages_exact. | ||
1741 | */ | ||
1742 | void free_pages_exact(void *virt, size_t size) | ||
1743 | { | ||
1744 | unsigned long addr = (unsigned long)virt; | ||
1745 | unsigned long end = addr + PAGE_ALIGN(size); | ||
1746 | |||
1747 | while (addr < end) { | ||
1748 | free_page(addr); | ||
1749 | addr += PAGE_SIZE; | ||
1750 | } | ||
1751 | } | ||
1752 | EXPORT_SYMBOL(free_pages_exact); | ||
1753 | |||
1714 | static unsigned int nr_free_zone_pages(int offset) | 1754 | static unsigned int nr_free_zone_pages(int offset) |
1715 | { | 1755 | { |
1716 | struct zoneref *z; | 1756 | struct zoneref *z; |
@@ -2352,6 +2392,7 @@ void build_all_zonelists(void) | |||
2352 | 2392 | ||
2353 | if (system_state == SYSTEM_BOOTING) { | 2393 | if (system_state == SYSTEM_BOOTING) { |
2354 | __build_all_zonelists(NULL); | 2394 | __build_all_zonelists(NULL); |
2395 | mminit_verify_zonelist(); | ||
2355 | cpuset_init_current_mems_allowed(); | 2396 | cpuset_init_current_mems_allowed(); |
2356 | } else { | 2397 | } else { |
2357 | /* we have to stop all cpus to guarantee there is no user | 2398 | /* we have to stop all cpus to guarantee there is no user |
@@ -2534,6 +2575,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2534 | } | 2575 | } |
2535 | page = pfn_to_page(pfn); | 2576 | page = pfn_to_page(pfn); |
2536 | set_page_links(page, zone, nid, pfn); | 2577 | set_page_links(page, zone, nid, pfn); |
2578 | mminit_verify_page_links(page, zone, nid, pfn); | ||
2537 | init_page_count(page); | 2579 | init_page_count(page); |
2538 | reset_page_mapcount(page); | 2580 | reset_page_mapcount(page); |
2539 | SetPageReserved(page); | 2581 | SetPageReserved(page); |
@@ -2611,7 +2653,7 @@ static int zone_batchsize(struct zone *zone) | |||
2611 | return batch; | 2653 | return batch; |
2612 | } | 2654 | } |
2613 | 2655 | ||
2614 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 2656 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) |
2615 | { | 2657 | { |
2616 | struct per_cpu_pages *pcp; | 2658 | struct per_cpu_pages *pcp; |
2617 | 2659 | ||
@@ -2836,6 +2878,12 @@ __meminit int init_currently_empty_zone(struct zone *zone, | |||
2836 | 2878 | ||
2837 | zone->zone_start_pfn = zone_start_pfn; | 2879 | zone->zone_start_pfn = zone_start_pfn; |
2838 | 2880 | ||
2881 | mminit_dprintk(MMINIT_TRACE, "memmap_init", | ||
2882 | "Initialising map node %d zone %lu pfns %lu -> %lu\n", | ||
2883 | pgdat->node_id, | ||
2884 | (unsigned long)zone_idx(zone), | ||
2885 | zone_start_pfn, (zone_start_pfn + size)); | ||
2886 | |||
2839 | zone_init_free_lists(zone); | 2887 | zone_init_free_lists(zone); |
2840 | 2888 | ||
2841 | return 0; | 2889 | return 0; |
@@ -2975,7 +3023,8 @@ void __init sparse_memory_present_with_active_regions(int nid) | |||
2975 | void __init push_node_boundaries(unsigned int nid, | 3023 | void __init push_node_boundaries(unsigned int nid, |
2976 | unsigned long start_pfn, unsigned long end_pfn) | 3024 | unsigned long start_pfn, unsigned long end_pfn) |
2977 | { | 3025 | { |
2978 | printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", | 3026 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
3027 | "Entering push_node_boundaries(%u, %lu, %lu)\n", | ||
2979 | nid, start_pfn, end_pfn); | 3028 | nid, start_pfn, end_pfn); |
2980 | 3029 | ||
2981 | /* Initialise the boundary for this node if necessary */ | 3030 | /* Initialise the boundary for this node if necessary */ |
@@ -2993,7 +3042,8 @@ void __init push_node_boundaries(unsigned int nid, | |||
2993 | static void __meminit account_node_boundary(unsigned int nid, | 3042 | static void __meminit account_node_boundary(unsigned int nid, |
2994 | unsigned long *start_pfn, unsigned long *end_pfn) | 3043 | unsigned long *start_pfn, unsigned long *end_pfn) |
2995 | { | 3044 | { |
2996 | printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", | 3045 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
3046 | "Entering account_node_boundary(%u, %lu, %lu)\n", | ||
2997 | nid, *start_pfn, *end_pfn); | 3047 | nid, *start_pfn, *end_pfn); |
2998 | 3048 | ||
2999 | /* Return if boundary information has not been provided */ | 3049 | /* Return if boundary information has not been provided */ |
@@ -3050,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid, | |||
3050 | * assumption is made that zones within a node are ordered in monotonic | 3100 | * assumption is made that zones within a node are ordered in monotonic |
3051 | * increasing memory addresses so that the "highest" populated zone is used | 3101 | * increasing memory addresses so that the "highest" populated zone is used |
3052 | */ | 3102 | */ |
3053 | void __init find_usable_zone_for_movable(void) | 3103 | static void __init find_usable_zone_for_movable(void) |
3054 | { | 3104 | { |
3055 | int zone_index; | 3105 | int zone_index; |
3056 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { | 3106 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { |
@@ -3076,7 +3126,7 @@ void __init find_usable_zone_for_movable(void) | |||
3076 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that | 3126 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that |
3077 | * zones within a node are in order of monotonic increases memory addresses | 3127 | * zones within a node are in order of monotonic increases memory addresses |
3078 | */ | 3128 | */ |
3079 | void __meminit adjust_zone_range_for_zone_movable(int nid, | 3129 | static void __meminit adjust_zone_range_for_zone_movable(int nid, |
3080 | unsigned long zone_type, | 3130 | unsigned long zone_type, |
3081 | unsigned long node_start_pfn, | 3131 | unsigned long node_start_pfn, |
3082 | unsigned long node_end_pfn, | 3132 | unsigned long node_end_pfn, |
@@ -3137,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
3137 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 3187 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
3138 | * then all holes in the requested range will be accounted for. | 3188 | * then all holes in the requested range will be accounted for. |
3139 | */ | 3189 | */ |
3140 | unsigned long __meminit __absent_pages_in_range(int nid, | 3190 | static unsigned long __meminit __absent_pages_in_range(int nid, |
3141 | unsigned long range_start_pfn, | 3191 | unsigned long range_start_pfn, |
3142 | unsigned long range_end_pfn) | 3192 | unsigned long range_end_pfn) |
3143 | { | 3193 | { |
@@ -3368,8 +3418,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3368 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; | 3418 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; |
3369 | if (realsize >= memmap_pages) { | 3419 | if (realsize >= memmap_pages) { |
3370 | realsize -= memmap_pages; | 3420 | realsize -= memmap_pages; |
3371 | printk(KERN_DEBUG | 3421 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3372 | " %s zone: %lu pages used for memmap\n", | 3422 | "%s zone: %lu pages used for memmap\n", |
3373 | zone_names[j], memmap_pages); | 3423 | zone_names[j], memmap_pages); |
3374 | } else | 3424 | } else |
3375 | printk(KERN_WARNING | 3425 | printk(KERN_WARNING |
@@ -3379,7 +3429,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3379 | /* Account for reserved pages */ | 3429 | /* Account for reserved pages */ |
3380 | if (j == 0 && realsize > dma_reserve) { | 3430 | if (j == 0 && realsize > dma_reserve) { |
3381 | realsize -= dma_reserve; | 3431 | realsize -= dma_reserve; |
3382 | printk(KERN_DEBUG " %s zone: %lu pages reserved\n", | 3432 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3433 | "%s zone: %lu pages reserved\n", | ||
3383 | zone_names[0], dma_reserve); | 3434 | zone_names[0], dma_reserve); |
3384 | } | 3435 | } |
3385 | 3436 | ||
@@ -3464,10 +3515,11 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
3464 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ | 3515 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ |
3465 | } | 3516 | } |
3466 | 3517 | ||
3467 | void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, | 3518 | void __paginginit free_area_init_node(int nid, unsigned long *zones_size, |
3468 | unsigned long *zones_size, unsigned long node_start_pfn, | 3519 | unsigned long node_start_pfn, unsigned long *zholes_size) |
3469 | unsigned long *zholes_size) | ||
3470 | { | 3520 | { |
3521 | pg_data_t *pgdat = NODE_DATA(nid); | ||
3522 | |||
3471 | pgdat->node_id = nid; | 3523 | pgdat->node_id = nid; |
3472 | pgdat->node_start_pfn = node_start_pfn; | 3524 | pgdat->node_start_pfn = node_start_pfn; |
3473 | calculate_node_totalpages(pgdat, zones_size, zholes_size); | 3525 | calculate_node_totalpages(pgdat, zones_size, zholes_size); |
@@ -3520,10 +3572,13 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3520 | { | 3572 | { |
3521 | int i; | 3573 | int i; |
3522 | 3574 | ||
3523 | printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) " | 3575 | mminit_dprintk(MMINIT_TRACE, "memory_register", |
3524 | "%d entries of %d used\n", | 3576 | "Entering add_active_range(%d, %#lx, %#lx) " |
3525 | nid, start_pfn, end_pfn, | 3577 | "%d entries of %d used\n", |
3526 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | 3578 | nid, start_pfn, end_pfn, |
3579 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | ||
3580 | |||
3581 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); | ||
3527 | 3582 | ||
3528 | /* Merge with existing active regions if possible */ | 3583 | /* Merge with existing active regions if possible */ |
3529 | for (i = 0; i < nr_nodemap_entries; i++) { | 3584 | for (i = 0; i < nr_nodemap_entries; i++) { |
@@ -3669,7 +3724,7 @@ static void __init sort_node_map(void) | |||
3669 | } | 3724 | } |
3670 | 3725 | ||
3671 | /* Find the lowest pfn for a node */ | 3726 | /* Find the lowest pfn for a node */ |
3672 | unsigned long __init find_min_pfn_for_node(int nid) | 3727 | static unsigned long __init find_min_pfn_for_node(int nid) |
3673 | { | 3728 | { |
3674 | int i; | 3729 | int i; |
3675 | unsigned long min_pfn = ULONG_MAX; | 3730 | unsigned long min_pfn = ULONG_MAX; |
@@ -3741,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void) | |||
3741 | * memory. When they don't, some nodes will have more kernelcore than | 3796 | * memory. When they don't, some nodes will have more kernelcore than |
3742 | * others | 3797 | * others |
3743 | */ | 3798 | */ |
3744 | void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | 3799 | static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) |
3745 | { | 3800 | { |
3746 | int i, nid; | 3801 | int i, nid; |
3747 | unsigned long usable_startpfn; | 3802 | unsigned long usable_startpfn; |
@@ -3957,10 +4012,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
3957 | early_node_map[i].end_pfn); | 4012 | early_node_map[i].end_pfn); |
3958 | 4013 | ||
3959 | /* Initialise every node */ | 4014 | /* Initialise every node */ |
4015 | mminit_verify_pageflags_layout(); | ||
3960 | setup_nr_node_ids(); | 4016 | setup_nr_node_ids(); |
3961 | for_each_online_node(nid) { | 4017 | for_each_online_node(nid) { |
3962 | pg_data_t *pgdat = NODE_DATA(nid); | 4018 | pg_data_t *pgdat = NODE_DATA(nid); |
3963 | free_area_init_node(nid, pgdat, NULL, | 4019 | free_area_init_node(nid, NULL, |
3964 | find_min_pfn_for_node(nid), NULL); | 4020 | find_min_pfn_for_node(nid), NULL); |
3965 | 4021 | ||
3966 | /* Any memory on that node */ | 4022 | /* Any memory on that node */ |
@@ -4025,15 +4081,13 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) | |||
4025 | } | 4081 | } |
4026 | 4082 | ||
4027 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 4083 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
4028 | static bootmem_data_t contig_bootmem_data; | 4084 | struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; |
4029 | struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; | ||
4030 | |||
4031 | EXPORT_SYMBOL(contig_page_data); | 4085 | EXPORT_SYMBOL(contig_page_data); |
4032 | #endif | 4086 | #endif |
4033 | 4087 | ||
4034 | void __init free_area_init(unsigned long *zones_size) | 4088 | void __init free_area_init(unsigned long *zones_size) |
4035 | { | 4089 | { |
4036 | free_area_init_node(0, NODE_DATA(0), zones_size, | 4090 | free_area_init_node(0, zones_size, |
4037 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); | 4091 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); |
4038 | } | 4092 | } |
4039 | 4093 | ||