diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/filemap.c | 8 | ||||
| -rw-r--r-- | mm/highmem.c | 14 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/mempool.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 35 | ||||
| -rw-r--r-- | mm/shmem.c | 4 | ||||
| -rw-r--r-- | mm/slab.c | 8 | ||||
| -rw-r--r-- | mm/vmscan.c | 8 |
8 files changed, 46 insertions, 39 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index b5346576e58d..1c31b2fd2ca5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping, | |||
| 377 | * This function does not add the page to the LRU. The caller must do that. | 377 | * This function does not add the page to the LRU. The caller must do that. |
| 378 | */ | 378 | */ |
| 379 | int add_to_page_cache(struct page *page, struct address_space *mapping, | 379 | int add_to_page_cache(struct page *page, struct address_space *mapping, |
| 380 | pgoff_t offset, int gfp_mask) | 380 | pgoff_t offset, gfp_t gfp_mask) |
| 381 | { | 381 | { |
| 382 | int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 382 | int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); |
| 383 | 383 | ||
| @@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, | |||
| 401 | EXPORT_SYMBOL(add_to_page_cache); | 401 | EXPORT_SYMBOL(add_to_page_cache); |
| 402 | 402 | ||
| 403 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 403 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
| 404 | pgoff_t offset, int gfp_mask) | 404 | pgoff_t offset, gfp_t gfp_mask) |
| 405 | { | 405 | { |
| 406 | int ret = add_to_page_cache(page, mapping, offset, gfp_mask); | 406 | int ret = add_to_page_cache(page, mapping, offset, gfp_mask); |
| 407 | if (ret == 0) | 407 | if (ret == 0) |
| @@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page); | |||
| 591 | * memory exhaustion. | 591 | * memory exhaustion. |
| 592 | */ | 592 | */ |
| 593 | struct page *find_or_create_page(struct address_space *mapping, | 593 | struct page *find_or_create_page(struct address_space *mapping, |
| 594 | unsigned long index, unsigned int gfp_mask) | 594 | unsigned long index, gfp_t gfp_mask) |
| 595 | { | 595 | { |
| 596 | struct page *page, *cached_page = NULL; | 596 | struct page *page, *cached_page = NULL; |
| 597 | int err; | 597 | int err; |
| @@ -683,7 +683,7 @@ struct page * | |||
| 683 | grab_cache_page_nowait(struct address_space *mapping, unsigned long index) | 683 | grab_cache_page_nowait(struct address_space *mapping, unsigned long index) |
| 684 | { | 684 | { |
| 685 | struct page *page = find_get_page(mapping, index); | 685 | struct page *page = find_get_page(mapping, index); |
| 686 | unsigned int gfp_mask; | 686 | gfp_t gfp_mask; |
| 687 | 687 | ||
| 688 | if (page) { | 688 | if (page) { |
| 689 | if (!TestSetPageLocked(page)) | 689 | if (!TestSetPageLocked(page)) |
diff --git a/mm/highmem.c b/mm/highmem.c index 90e1861e2da0..ce2e7e8bbfa7 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
| @@ -30,11 +30,9 @@ | |||
| 30 | 30 | ||
| 31 | static mempool_t *page_pool, *isa_page_pool; | 31 | static mempool_t *page_pool, *isa_page_pool; |
| 32 | 32 | ||
| 33 | static void *page_pool_alloc(gfp_t gfp_mask, void *data) | 33 | static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) |
| 34 | { | 34 | { |
| 35 | unsigned int gfp = gfp_mask | (unsigned int) (long) data; | 35 | return alloc_page(gfp_mask | GFP_DMA); |
| 36 | |||
| 37 | return alloc_page(gfp); | ||
| 38 | } | 36 | } |
| 39 | 37 | ||
| 40 | static void page_pool_free(void *page, void *data) | 38 | static void page_pool_free(void *page, void *data) |
| @@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data) | |||
| 51 | * n means that there are (n-1) current users of it. | 49 | * n means that there are (n-1) current users of it. |
| 52 | */ | 50 | */ |
| 53 | #ifdef CONFIG_HIGHMEM | 51 | #ifdef CONFIG_HIGHMEM |
| 52 | |||
| 53 | static void *page_pool_alloc(gfp_t gfp_mask, void *data) | ||
| 54 | { | ||
| 55 | return alloc_page(gfp_mask); | ||
| 56 | } | ||
| 57 | |||
| 54 | static int pkmap_count[LAST_PKMAP]; | 58 | static int pkmap_count[LAST_PKMAP]; |
| 55 | static unsigned int last_pkmap_nr; | 59 | static unsigned int last_pkmap_nr; |
| 56 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | 60 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
| @@ -267,7 +271,7 @@ int init_emergency_isa_pool(void) | |||
| 267 | if (isa_page_pool) | 271 | if (isa_page_pool) |
| 268 | return 0; | 272 | return 0; |
| 269 | 273 | ||
| 270 | isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); | 274 | isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); |
| 271 | if (!isa_page_pool) | 275 | if (!isa_page_pool) |
| 272 | BUG(); | 276 | BUG(); |
| 273 | 277 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 37af443eb094..1d5c64df1653 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -700,7 +700,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) | |||
| 700 | case MPOL_BIND: | 700 | case MPOL_BIND: |
| 701 | /* Lower zones don't get a policy applied */ | 701 | /* Lower zones don't get a policy applied */ |
| 702 | /* Careful: current->mems_allowed might have moved */ | 702 | /* Careful: current->mems_allowed might have moved */ |
| 703 | if ((gfp & GFP_ZONEMASK) >= policy_zone) | 703 | if (gfp_zone(gfp) >= policy_zone) |
| 704 | if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) | 704 | if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) |
| 705 | return policy->v.zonelist; | 705 | return policy->v.zonelist; |
| 706 | /*FALL THROUGH*/ | 706 | /*FALL THROUGH*/ |
| @@ -712,7 +712,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) | |||
| 712 | nd = 0; | 712 | nd = 0; |
| 713 | BUG(); | 713 | BUG(); |
| 714 | } | 714 | } |
| 715 | return NODE_DATA(nd)->node_zonelists + (gfp & GFP_ZONEMASK); | 715 | return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); |
| 716 | } | 716 | } |
| 717 | 717 | ||
| 718 | /* Do dynamic interleaving for a process */ | 718 | /* Do dynamic interleaving for a process */ |
| @@ -757,7 +757,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned ni | |||
| 757 | struct page *page; | 757 | struct page *page; |
| 758 | 758 | ||
| 759 | BUG_ON(!node_online(nid)); | 759 | BUG_ON(!node_online(nid)); |
| 760 | zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK); | 760 | zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); |
| 761 | page = __alloc_pages(gfp, order, zl); | 761 | page = __alloc_pages(gfp, order, zl); |
| 762 | if (page && page_zone(page) == zl->zones[0]) { | 762 | if (page && page_zone(page) == zl->zones[0]) { |
| 763 | zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; | 763 | zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; |
diff --git a/mm/mempool.c b/mm/mempool.c index 9e377ea700b2..1a99b80480d3 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
| @@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) | |||
| 205 | void *element; | 205 | void *element; |
| 206 | unsigned long flags; | 206 | unsigned long flags; |
| 207 | wait_queue_t wait; | 207 | wait_queue_t wait; |
| 208 | unsigned int gfp_temp; | 208 | gfp_t gfp_temp; |
| 209 | 209 | ||
| 210 | might_sleep_if(gfp_mask & __GFP_WAIT); | 210 | might_sleep_if(gfp_mask & __GFP_WAIT); |
| 211 | 211 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cc1fe2672a31..94c864eac9c4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) | |||
| 734 | * of the allocation. | 734 | * of the allocation. |
| 735 | */ | 735 | */ |
| 736 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 736 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
| 737 | int classzone_idx, int can_try_harder, int gfp_high) | 737 | int classzone_idx, int can_try_harder, gfp_t gfp_high) |
| 738 | { | 738 | { |
| 739 | /* free_pages my go negative - that's OK */ | 739 | /* free_pages my go negative - that's OK */ |
| 740 | long min = mark, free_pages = z->free_pages - (1 << order) + 1; | 740 | long min = mark, free_pages = z->free_pages - (1 << order) + 1; |
| @@ -777,7 +777,7 @@ struct page * fastcall | |||
| 777 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 777 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
| 778 | struct zonelist *zonelist) | 778 | struct zonelist *zonelist) |
| 779 | { | 779 | { |
| 780 | const int wait = gfp_mask & __GFP_WAIT; | 780 | const gfp_t wait = gfp_mask & __GFP_WAIT; |
| 781 | struct zone **zones, *z; | 781 | struct zone **zones, *z; |
| 782 | struct page *page; | 782 | struct page *page; |
| 783 | struct reclaim_state reclaim_state; | 783 | struct reclaim_state reclaim_state; |
| @@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) | |||
| 996 | * get_zeroed_page() returns a 32-bit address, which cannot represent | 996 | * get_zeroed_page() returns a 32-bit address, which cannot represent |
| 997 | * a highmem page | 997 | * a highmem page |
| 998 | */ | 998 | */ |
| 999 | BUG_ON(gfp_mask & __GFP_HIGHMEM); | 999 | BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); |
| 1000 | 1000 | ||
| 1001 | page = alloc_pages(gfp_mask | __GFP_ZERO, 0); | 1001 | page = alloc_pages(gfp_mask | __GFP_ZERO, 0); |
| 1002 | if (page) | 1002 | if (page) |
| @@ -1089,7 +1089,7 @@ static unsigned int nr_free_zone_pages(int offset) | |||
| 1089 | */ | 1089 | */ |
| 1090 | unsigned int nr_free_buffer_pages(void) | 1090 | unsigned int nr_free_buffer_pages(void) |
| 1091 | { | 1091 | { |
| 1092 | return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK); | 1092 | return nr_free_zone_pages(gfp_zone(GFP_USER)); |
| 1093 | } | 1093 | } |
| 1094 | 1094 | ||
| 1095 | /* | 1095 | /* |
| @@ -1097,7 +1097,7 @@ unsigned int nr_free_buffer_pages(void) | |||
| 1097 | */ | 1097 | */ |
| 1098 | unsigned int nr_free_pagecache_pages(void) | 1098 | unsigned int nr_free_pagecache_pages(void) |
| 1099 | { | 1099 | { |
| 1100 | return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK); | 1100 | return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); |
| 1101 | } | 1101 | } |
| 1102 | 1102 | ||
| 1103 | #ifdef CONFIG_HIGHMEM | 1103 | #ifdef CONFIG_HIGHMEM |
| @@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli | |||
| 1428 | return j; | 1428 | return j; |
| 1429 | } | 1429 | } |
| 1430 | 1430 | ||
| 1431 | static inline int highest_zone(int zone_bits) | ||
| 1432 | { | ||
| 1433 | int res = ZONE_NORMAL; | ||
| 1434 | if (zone_bits & (__force int)__GFP_HIGHMEM) | ||
| 1435 | res = ZONE_HIGHMEM; | ||
| 1436 | if (zone_bits & (__force int)__GFP_DMA) | ||
| 1437 | res = ZONE_DMA; | ||
| 1438 | return res; | ||
| 1439 | } | ||
| 1440 | |||
| 1431 | #ifdef CONFIG_NUMA | 1441 | #ifdef CONFIG_NUMA |
| 1432 | #define MAX_NODE_LOAD (num_online_nodes()) | 1442 | #define MAX_NODE_LOAD (num_online_nodes()) |
| 1433 | static int __initdata node_load[MAX_NUMNODES]; | 1443 | static int __initdata node_load[MAX_NUMNODES]; |
| @@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat) | |||
| 1524 | zonelist = pgdat->node_zonelists + i; | 1534 | zonelist = pgdat->node_zonelists + i; |
| 1525 | for (j = 0; zonelist->zones[j] != NULL; j++); | 1535 | for (j = 0; zonelist->zones[j] != NULL; j++); |
| 1526 | 1536 | ||
| 1527 | k = ZONE_NORMAL; | 1537 | k = highest_zone(i); |
| 1528 | if (i & __GFP_HIGHMEM) | ||
| 1529 | k = ZONE_HIGHMEM; | ||
| 1530 | if (i & __GFP_DMA) | ||
| 1531 | k = ZONE_DMA; | ||
| 1532 | 1538 | ||
| 1533 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); | 1539 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); |
| 1534 | zonelist->zones[j] = NULL; | 1540 | zonelist->zones[j] = NULL; |
| @@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat) | |||
| 1549 | zonelist = pgdat->node_zonelists + i; | 1555 | zonelist = pgdat->node_zonelists + i; |
| 1550 | 1556 | ||
| 1551 | j = 0; | 1557 | j = 0; |
| 1552 | k = ZONE_NORMAL; | 1558 | k = highest_zone(i); |
| 1553 | if (i & __GFP_HIGHMEM) | ||
| 1554 | k = ZONE_HIGHMEM; | ||
| 1555 | if (i & __GFP_DMA) | ||
| 1556 | k = ZONE_DMA; | ||
| 1557 | |||
| 1558 | j = build_zonelists_node(pgdat, zonelist, j, k); | 1559 | j = build_zonelists_node(pgdat, zonelist, j, k); |
| 1559 | /* | 1560 | /* |
| 1560 | * Now we build the zonelist so that it contains the zones | 1561 | * Now we build the zonelist so that it contains the zones |
| @@ -1750,6 +1751,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | |||
| 1750 | { | 1751 | { |
| 1751 | struct per_cpu_pages *pcp; | 1752 | struct per_cpu_pages *pcp; |
| 1752 | 1753 | ||
| 1754 | memset(p, 0, sizeof(*p)); | ||
| 1755 | |||
| 1753 | pcp = &p->pcp[0]; /* hot */ | 1756 | pcp = &p->pcp[0]; /* hot */ |
| 1754 | pcp->count = 0; | 1757 | pcp->count = 0; |
| 1755 | pcp->low = 2 * batch; | 1758 | pcp->low = 2 * batch; |
diff --git a/mm/shmem.c b/mm/shmem.c index ea064d89cda9..55e04a0734c1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -85,7 +85,7 @@ enum sgp_type { | |||
| 85 | static int shmem_getpage(struct inode *inode, unsigned long idx, | 85 | static int shmem_getpage(struct inode *inode, unsigned long idx, |
| 86 | struct page **pagep, enum sgp_type sgp, int *type); | 86 | struct page **pagep, enum sgp_type sgp, int *type); |
| 87 | 87 | ||
| 88 | static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) | 88 | static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) |
| 89 | { | 89 | { |
| 90 | /* | 90 | /* |
| 91 | * The above definition of ENTRIES_PER_PAGE, and the use of | 91 | * The above definition of ENTRIES_PER_PAGE, and the use of |
| @@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, | |||
| 898 | } | 898 | } |
| 899 | 899 | ||
| 900 | static struct page * | 900 | static struct page * |
| 901 | shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, | 901 | shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, |
| 902 | unsigned long idx) | 902 | unsigned long idx) |
| 903 | { | 903 | { |
| 904 | struct vm_area_struct pvma; | 904 | struct vm_area_struct pvma; |
| @@ -386,7 +386,7 @@ struct kmem_cache_s { | |||
| 386 | unsigned int gfporder; | 386 | unsigned int gfporder; |
| 387 | 387 | ||
| 388 | /* force GFP flags, e.g. GFP_DMA */ | 388 | /* force GFP flags, e.g. GFP_DMA */ |
| 389 | unsigned int gfpflags; | 389 | gfp_t gfpflags; |
| 390 | 390 | ||
| 391 | size_t colour; /* cache colouring range */ | 391 | size_t colour; /* cache colouring range */ |
| 392 | unsigned int colour_off; /* colour offset */ | 392 | unsigned int colour_off; /* colour offset */ |
| @@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep, | |||
| 2117 | slabp->free = 0; | 2117 | slabp->free = 0; |
| 2118 | } | 2118 | } |
| 2119 | 2119 | ||
| 2120 | static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags) | 2120 | static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) |
| 2121 | { | 2121 | { |
| 2122 | if (flags & SLAB_DMA) { | 2122 | if (flags & SLAB_DMA) { |
| 2123 | if (!(cachep->gfpflags & GFP_DMA)) | 2123 | if (!(cachep->gfpflags & GFP_DMA)) |
| @@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |||
| 2152 | struct slab *slabp; | 2152 | struct slab *slabp; |
| 2153 | void *objp; | 2153 | void *objp; |
| 2154 | size_t offset; | 2154 | size_t offset; |
| 2155 | unsigned int local_flags; | 2155 | gfp_t local_flags; |
| 2156 | unsigned long ctor_flags; | 2156 | unsigned long ctor_flags; |
| 2157 | struct kmem_list3 *l3; | 2157 | struct kmem_list3 *l3; |
| 2158 | 2158 | ||
| @@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |||
| 2546 | /* | 2546 | /* |
| 2547 | * A interface to enable slab creation on nodeid | 2547 | * A interface to enable slab creation on nodeid |
| 2548 | */ | 2548 | */ |
| 2549 | static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) | 2549 | static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 2550 | { | 2550 | { |
| 2551 | struct list_head *entry; | 2551 | struct list_head *entry; |
| 2552 | struct slab *slabp; | 2552 | struct slab *slabp; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 64f9570cff56..843c87d1e61f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -70,7 +70,7 @@ struct scan_control { | |||
| 70 | unsigned int priority; | 70 | unsigned int priority; |
| 71 | 71 | ||
| 72 | /* This context's GFP mask */ | 72 | /* This context's GFP mask */ |
| 73 | unsigned int gfp_mask; | 73 | gfp_t gfp_mask; |
| 74 | 74 | ||
| 75 | int may_writepage; | 75 | int may_writepage; |
| 76 | 76 | ||
| @@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker); | |||
| 186 | * | 186 | * |
| 187 | * Returns the number of slab objects which we shrunk. | 187 | * Returns the number of slab objects which we shrunk. |
| 188 | */ | 188 | */ |
| 189 | static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, | 189 | static int shrink_slab(unsigned long scanned, gfp_t gfp_mask, |
| 190 | unsigned long lru_pages) | 190 | unsigned long lru_pages) |
| 191 | { | 191 | { |
| 192 | struct shrinker *shrinker; | 192 | struct shrinker *shrinker; |
| @@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) | |||
| 926 | * holds filesystem locks which prevent writeout this might not work, and the | 926 | * holds filesystem locks which prevent writeout this might not work, and the |
| 927 | * allocation attempt will fail. | 927 | * allocation attempt will fail. |
| 928 | */ | 928 | */ |
| 929 | int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) | 929 | int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) |
| 930 | { | 930 | { |
| 931 | int priority; | 931 | int priority; |
| 932 | int ret = 0; | 932 | int ret = 0; |
| @@ -1338,7 +1338,7 @@ module_init(kswapd_init) | |||
| 1338 | /* | 1338 | /* |
| 1339 | * Try to free up some pages from this zone through reclaim. | 1339 | * Try to free up some pages from this zone through reclaim. |
| 1340 | */ | 1340 | */ |
| 1341 | int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) | 1341 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) |
| 1342 | { | 1342 | { |
| 1343 | struct scan_control sc; | 1343 | struct scan_control sc; |
| 1344 | int nr_pages = 1 << order; | 1344 | int nr_pages = 1 << order; |
