aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-28 12:29:23 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-28 12:29:23 -0400
commit7a9f8f93d2dad38f30fbc79d8a1e6517373aa4b6 (patch)
tree9116e5bde860d00685c5b6eee7be5ba9899aabb9 /mm
parent972c26bdd6b58e7534473c4f7928584578cf43f4 (diff)
parent5fadd053d9bb4345ec6f405d24db4e7eb49cf81e (diff)
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c31
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/highmem.c14
-rw-r--r--mm/hugetlb.c57
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/page_alloc.c35
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c8
-rw-r--r--mm/vmscan.c8
11 files changed, 112 insertions, 65 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index c1330cc19783..a58699b6579e 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -154,10 +154,10 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
154 */ 154 */
155static void * __init 155static void * __init
156__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, 156__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
157 unsigned long align, unsigned long goal) 157 unsigned long align, unsigned long goal, unsigned long limit)
158{ 158{
159 unsigned long offset, remaining_size, areasize, preferred; 159 unsigned long offset, remaining_size, areasize, preferred;
160 unsigned long i, start = 0, incr, eidx; 160 unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn;
161 void *ret; 161 void *ret;
162 162
163 if(!size) { 163 if(!size) {
@@ -166,7 +166,14 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
166 } 166 }
167 BUG_ON(align & (align-1)); 167 BUG_ON(align & (align-1));
168 168
169 eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); 169 if (limit && bdata->node_boot_start >= limit)
170 return NULL;
171
172 limit >>=PAGE_SHIFT;
173 if (limit && end_pfn > limit)
174 end_pfn = limit;
175
176 eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
170 offset = 0; 177 offset = 0;
171 if (align && 178 if (align &&
172 (bdata->node_boot_start & (align - 1UL)) != 0) 179 (bdata->node_boot_start & (align - 1UL)) != 0)
@@ -178,11 +185,12 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
178 * first, then we try to allocate lower pages. 185 * first, then we try to allocate lower pages.
179 */ 186 */
180 if (goal && (goal >= bdata->node_boot_start) && 187 if (goal && (goal >= bdata->node_boot_start) &&
181 ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { 188 ((goal >> PAGE_SHIFT) < end_pfn)) {
182 preferred = goal - bdata->node_boot_start; 189 preferred = goal - bdata->node_boot_start;
183 190
184 if (bdata->last_success >= preferred) 191 if (bdata->last_success >= preferred)
185 preferred = bdata->last_success; 192 if (!limit || (limit && limit > bdata->last_success))
193 preferred = bdata->last_success;
186 } else 194 } else
187 preferred = 0; 195 preferred = 0;
188 196
@@ -382,14 +390,15 @@ unsigned long __init free_all_bootmem (void)
382 return(free_all_bootmem_core(NODE_DATA(0))); 390 return(free_all_bootmem_core(NODE_DATA(0)));
383} 391}
384 392
385void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) 393void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal,
394 unsigned long limit)
386{ 395{
387 pg_data_t *pgdat = pgdat_list; 396 pg_data_t *pgdat = pgdat_list;
388 void *ptr; 397 void *ptr;
389 398
390 for_each_pgdat(pgdat) 399 for_each_pgdat(pgdat)
391 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 400 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
392 align, goal))) 401 align, goal, limit)))
393 return(ptr); 402 return(ptr);
394 403
395 /* 404 /*
@@ -400,14 +409,16 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned
400 return NULL; 409 return NULL;
401} 410}
402 411
403void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) 412
413void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align,
414 unsigned long goal, unsigned long limit)
404{ 415{
405 void *ptr; 416 void *ptr;
406 417
407 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal); 418 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit);
408 if (ptr) 419 if (ptr)
409 return (ptr); 420 return (ptr);
410 421
411 return __alloc_bootmem(size, align, goal); 422 return __alloc_bootmem_limit(size, align, goal, limit);
412} 423}
413 424
diff --git a/mm/filemap.c b/mm/filemap.c
index b5346576e58d..1c31b2fd2ca5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
377 * This function does not add the page to the LRU. The caller must do that. 377 * This function does not add the page to the LRU. The caller must do that.
378 */ 378 */
379int add_to_page_cache(struct page *page, struct address_space *mapping, 379int add_to_page_cache(struct page *page, struct address_space *mapping,
380 pgoff_t offset, int gfp_mask) 380 pgoff_t offset, gfp_t gfp_mask)
381{ 381{
382 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 382 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
383 383
@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
401EXPORT_SYMBOL(add_to_page_cache); 401EXPORT_SYMBOL(add_to_page_cache);
402 402
403int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 403int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
404 pgoff_t offset, int gfp_mask) 404 pgoff_t offset, gfp_t gfp_mask)
405{ 405{
406 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 406 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
407 if (ret == 0) 407 if (ret == 0)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
591 * memory exhaustion. 591 * memory exhaustion.
592 */ 592 */
593struct page *find_or_create_page(struct address_space *mapping, 593struct page *find_or_create_page(struct address_space *mapping,
594 unsigned long index, unsigned int gfp_mask) 594 unsigned long index, gfp_t gfp_mask)
595{ 595{
596 struct page *page, *cached_page = NULL; 596 struct page *page, *cached_page = NULL;
597 int err; 597 int err;
@@ -683,7 +683,7 @@ struct page *
683grab_cache_page_nowait(struct address_space *mapping, unsigned long index) 683grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
684{ 684{
685 struct page *page = find_get_page(mapping, index); 685 struct page *page = find_get_page(mapping, index);
686 unsigned int gfp_mask; 686 gfp_t gfp_mask;
687 687
688 if (page) { 688 if (page) {
689 if (!TestSetPageLocked(page)) 689 if (!TestSetPageLocked(page))
diff --git a/mm/highmem.c b/mm/highmem.c
index 90e1861e2da0..ce2e7e8bbfa7 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,11 +30,9 @@
30 30
31static mempool_t *page_pool, *isa_page_pool; 31static mempool_t *page_pool, *isa_page_pool;
32 32
33static void *page_pool_alloc(gfp_t gfp_mask, void *data) 33static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
34{ 34{
35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 35 return alloc_page(gfp_mask | GFP_DMA);
36
37 return alloc_page(gfp);
38} 36}
39 37
40static void page_pool_free(void *page, void *data) 38static void page_pool_free(void *page, void *data)
@@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data)
51 * n means that there are (n-1) current users of it. 49 * n means that there are (n-1) current users of it.
52 */ 50 */
53#ifdef CONFIG_HIGHMEM 51#ifdef CONFIG_HIGHMEM
52
53static void *page_pool_alloc(gfp_t gfp_mask, void *data)
54{
55 return alloc_page(gfp_mask);
56}
57
54static int pkmap_count[LAST_PKMAP]; 58static int pkmap_count[LAST_PKMAP];
55static unsigned int last_pkmap_nr; 59static unsigned int last_pkmap_nr;
56static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 60static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -267,7 +271,7 @@ int init_emergency_isa_pool(void)
267 if (isa_page_pool) 271 if (isa_page_pool)
268 return 0; 272 return 0;
269 273
270 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); 274 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
271 if (!isa_page_pool) 275 if (!isa_page_pool)
272 BUG(); 276 BUG();
273 277
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 901ac523a1c3..61d380678030 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -274,21 +274,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
274{ 274{
275 pte_t *src_pte, *dst_pte, entry; 275 pte_t *src_pte, *dst_pte, entry;
276 struct page *ptepage; 276 struct page *ptepage;
277 unsigned long addr = vma->vm_start; 277 unsigned long addr;
278 unsigned long end = vma->vm_end;
279 278
280 while (addr < end) { 279 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
281 dst_pte = huge_pte_alloc(dst, addr); 280 dst_pte = huge_pte_alloc(dst, addr);
282 if (!dst_pte) 281 if (!dst_pte)
283 goto nomem; 282 goto nomem;
283 spin_lock(&src->page_table_lock);
284 src_pte = huge_pte_offset(src, addr); 284 src_pte = huge_pte_offset(src, addr);
285 BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */ 285 if (src_pte && !pte_none(*src_pte)) {
286 entry = *src_pte; 286 entry = *src_pte;
287 ptepage = pte_page(entry); 287 ptepage = pte_page(entry);
288 get_page(ptepage); 288 get_page(ptepage);
289 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); 289 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
290 set_huge_pte_at(dst, addr, dst_pte, entry); 290 set_huge_pte_at(dst, addr, dst_pte, entry);
291 addr += HPAGE_SIZE; 291 }
292 spin_unlock(&src->page_table_lock);
292 } 293 }
293 return 0; 294 return 0;
294 295
@@ -323,8 +324,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
323 324
324 page = pte_page(pte); 325 page = pte_page(pte);
325 put_page(page); 326 put_page(page);
327 add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE));
326 } 328 }
327 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
328 flush_tlb_range(vma, start, end); 329 flush_tlb_range(vma, start, end);
329} 330}
330 331
@@ -393,6 +394,28 @@ out:
393 return ret; 394 return ret;
394} 395}
395 396
397/*
398 * On ia64 at least, it is possible to receive a hugetlb fault from a
399 * stale zero entry left in the TLB from earlier hardware prefetching.
400 * Low-level arch code should already have flushed the stale entry as
401 * part of its fault handling, but we do need to accept this minor fault
402 * and return successfully. Whereas the "normal" case is that this is
403 * an access to a hugetlb page which has been truncated off since mmap.
404 */
405int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
406 unsigned long address, int write_access)
407{
408 int ret = VM_FAULT_SIGBUS;
409 pte_t *pte;
410
411 spin_lock(&mm->page_table_lock);
412 pte = huge_pte_offset(mm, address);
413 if (pte && !pte_none(*pte))
414 ret = VM_FAULT_MINOR;
415 spin_unlock(&mm->page_table_lock);
416 return ret;
417}
418
396int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 419int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
397 struct page **pages, struct vm_area_struct **vmas, 420 struct page **pages, struct vm_area_struct **vmas,
398 unsigned long *position, int *length, int i) 421 unsigned long *position, int *length, int i)
@@ -403,6 +426,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
403 BUG_ON(!is_vm_hugetlb_page(vma)); 426 BUG_ON(!is_vm_hugetlb_page(vma));
404 427
405 vpfn = vaddr/PAGE_SIZE; 428 vpfn = vaddr/PAGE_SIZE;
429 spin_lock(&mm->page_table_lock);
406 while (vaddr < vma->vm_end && remainder) { 430 while (vaddr < vma->vm_end && remainder) {
407 431
408 if (pages) { 432 if (pages) {
@@ -415,8 +439,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
415 * indexing below to work. */ 439 * indexing below to work. */
416 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 440 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
417 441
418 /* hugetlb should be locked, and hence, prefaulted */ 442 /* the hugetlb file might have been truncated */
419 WARN_ON(!pte || pte_none(*pte)); 443 if (!pte || pte_none(*pte)) {
444 remainder = 0;
445 if (!i)
446 i = -EFAULT;
447 break;
448 }
420 449
421 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; 450 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
422 451
@@ -434,7 +463,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
434 --remainder; 463 --remainder;
435 ++i; 464 ++i;
436 } 465 }
437 466 spin_unlock(&mm->page_table_lock);
438 *length = remainder; 467 *length = remainder;
439 *position = vaddr; 468 *position = vaddr;
440 469
diff --git a/mm/memory.c b/mm/memory.c
index ae8161f1f459..1db40e935e55 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2045,8 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
2045 2045
2046 inc_page_state(pgfault); 2046 inc_page_state(pgfault);
2047 2047
2048 if (is_vm_hugetlb_page(vma)) 2048 if (unlikely(is_vm_hugetlb_page(vma)))
2049 return VM_FAULT_SIGBUS; /* mapping truncation does this. */ 2049 return hugetlb_fault(mm, vma, address, write_access);
2050 2050
2051 /* 2051 /*
2052 * We need the page table lock to synchronize with kswapd 2052 * We need the page table lock to synchronize with kswapd
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 37af443eb094..1d5c64df1653 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -700,7 +700,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
700 case MPOL_BIND: 700 case MPOL_BIND:
701 /* Lower zones don't get a policy applied */ 701 /* Lower zones don't get a policy applied */
702 /* Careful: current->mems_allowed might have moved */ 702 /* Careful: current->mems_allowed might have moved */
703 if ((gfp & GFP_ZONEMASK) >= policy_zone) 703 if (gfp_zone(gfp) >= policy_zone)
704 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) 704 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
705 return policy->v.zonelist; 705 return policy->v.zonelist;
706 /*FALL THROUGH*/ 706 /*FALL THROUGH*/
@@ -712,7 +712,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
712 nd = 0; 712 nd = 0;
713 BUG(); 713 BUG();
714 } 714 }
715 return NODE_DATA(nd)->node_zonelists + (gfp & GFP_ZONEMASK); 715 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
716} 716}
717 717
718/* Do dynamic interleaving for a process */ 718/* Do dynamic interleaving for a process */
@@ -757,7 +757,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned ni
757 struct page *page; 757 struct page *page;
758 758
759 BUG_ON(!node_online(nid)); 759 BUG_ON(!node_online(nid));
760 zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK); 760 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
761 page = __alloc_pages(gfp, order, zl); 761 page = __alloc_pages(gfp, order, zl);
762 if (page && page_zone(page) == zl->zones[0]) { 762 if (page && page_zone(page) == zl->zones[0]) {
763 zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; 763 zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
diff --git a/mm/mempool.c b/mm/mempool.c
index 9e377ea700b2..1a99b80480d3 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
205 void *element; 205 void *element;
206 unsigned long flags; 206 unsigned long flags;
207 wait_queue_t wait; 207 wait_queue_t wait;
208 unsigned int gfp_temp; 208 gfp_t gfp_temp;
209 209
210 might_sleep_if(gfp_mask & __GFP_WAIT); 210 might_sleep_if(gfp_mask & __GFP_WAIT);
211 211
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cc1fe2672a31..94c864eac9c4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
734 * of the allocation. 734 * of the allocation.
735 */ 735 */
736int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 736int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
737 int classzone_idx, int can_try_harder, int gfp_high) 737 int classzone_idx, int can_try_harder, gfp_t gfp_high)
738{ 738{
739 /* free_pages my go negative - that's OK */ 739 /* free_pages my go negative - that's OK */
740 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 740 long min = mark, free_pages = z->free_pages - (1 << order) + 1;
@@ -777,7 +777,7 @@ struct page * fastcall
777__alloc_pages(gfp_t gfp_mask, unsigned int order, 777__alloc_pages(gfp_t gfp_mask, unsigned int order,
778 struct zonelist *zonelist) 778 struct zonelist *zonelist)
779{ 779{
780 const int wait = gfp_mask & __GFP_WAIT; 780 const gfp_t wait = gfp_mask & __GFP_WAIT;
781 struct zone **zones, *z; 781 struct zone **zones, *z;
782 struct page *page; 782 struct page *page;
783 struct reclaim_state reclaim_state; 783 struct reclaim_state reclaim_state;
@@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
996 * get_zeroed_page() returns a 32-bit address, which cannot represent 996 * get_zeroed_page() returns a 32-bit address, which cannot represent
997 * a highmem page 997 * a highmem page
998 */ 998 */
999 BUG_ON(gfp_mask & __GFP_HIGHMEM); 999 BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1000 1000
1001 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1001 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1002 if (page) 1002 if (page)
@@ -1089,7 +1089,7 @@ static unsigned int nr_free_zone_pages(int offset)
1089 */ 1089 */
1090unsigned int nr_free_buffer_pages(void) 1090unsigned int nr_free_buffer_pages(void)
1091{ 1091{
1092 return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK); 1092 return nr_free_zone_pages(gfp_zone(GFP_USER));
1093} 1093}
1094 1094
1095/* 1095/*
@@ -1097,7 +1097,7 @@ unsigned int nr_free_buffer_pages(void)
1097 */ 1097 */
1098unsigned int nr_free_pagecache_pages(void) 1098unsigned int nr_free_pagecache_pages(void)
1099{ 1099{
1100 return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK); 1100 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
1101} 1101}
1102 1102
1103#ifdef CONFIG_HIGHMEM 1103#ifdef CONFIG_HIGHMEM
@@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
1428 return j; 1428 return j;
1429} 1429}
1430 1430
1431static inline int highest_zone(int zone_bits)
1432{
1433 int res = ZONE_NORMAL;
1434 if (zone_bits & (__force int)__GFP_HIGHMEM)
1435 res = ZONE_HIGHMEM;
1436 if (zone_bits & (__force int)__GFP_DMA)
1437 res = ZONE_DMA;
1438 return res;
1439}
1440
1431#ifdef CONFIG_NUMA 1441#ifdef CONFIG_NUMA
1432#define MAX_NODE_LOAD (num_online_nodes()) 1442#define MAX_NODE_LOAD (num_online_nodes())
1433static int __initdata node_load[MAX_NUMNODES]; 1443static int __initdata node_load[MAX_NUMNODES];
@@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
1524 zonelist = pgdat->node_zonelists + i; 1534 zonelist = pgdat->node_zonelists + i;
1525 for (j = 0; zonelist->zones[j] != NULL; j++); 1535 for (j = 0; zonelist->zones[j] != NULL; j++);
1526 1536
1527 k = ZONE_NORMAL; 1537 k = highest_zone(i);
1528 if (i & __GFP_HIGHMEM)
1529 k = ZONE_HIGHMEM;
1530 if (i & __GFP_DMA)
1531 k = ZONE_DMA;
1532 1538
1533 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1539 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1534 zonelist->zones[j] = NULL; 1540 zonelist->zones[j] = NULL;
@@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
1549 zonelist = pgdat->node_zonelists + i; 1555 zonelist = pgdat->node_zonelists + i;
1550 1556
1551 j = 0; 1557 j = 0;
1552 k = ZONE_NORMAL; 1558 k = highest_zone(i);
1553 if (i & __GFP_HIGHMEM)
1554 k = ZONE_HIGHMEM;
1555 if (i & __GFP_DMA)
1556 k = ZONE_DMA;
1557
1558 j = build_zonelists_node(pgdat, zonelist, j, k); 1559 j = build_zonelists_node(pgdat, zonelist, j, k);
1559 /* 1560 /*
1560 * Now we build the zonelist so that it contains the zones 1561 * Now we build the zonelist so that it contains the zones
@@ -1750,6 +1751,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1750{ 1751{
1751 struct per_cpu_pages *pcp; 1752 struct per_cpu_pages *pcp;
1752 1753
1754 memset(p, 0, sizeof(*p));
1755
1753 pcp = &p->pcp[0]; /* hot */ 1756 pcp = &p->pcp[0]; /* hot */
1754 pcp->count = 0; 1757 pcp->count = 0;
1755 pcp->low = 2 * batch; 1758 pcp->low = 2 * batch;
diff --git a/mm/shmem.c b/mm/shmem.c
index ea064d89cda9..55e04a0734c1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ enum sgp_type {
85static int shmem_getpage(struct inode *inode, unsigned long idx, 85static int shmem_getpage(struct inode *inode, unsigned long idx,
86 struct page **pagep, enum sgp_type sgp, int *type); 86 struct page **pagep, enum sgp_type sgp, int *type);
87 87
88static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) 88static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
89{ 89{
90 /* 90 /*
91 * The above definition of ENTRIES_PER_PAGE, and the use of 91 * The above definition of ENTRIES_PER_PAGE, and the use of
@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
898} 898}
899 899
900static struct page * 900static struct page *
901shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, 901shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
902 unsigned long idx) 902 unsigned long idx)
903{ 903{
904 struct vm_area_struct pvma; 904 struct vm_area_struct pvma;
diff --git a/mm/slab.c b/mm/slab.c
index d05c678bceb3..d30423f167a2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,7 +386,7 @@ struct kmem_cache_s {
386 unsigned int gfporder; 386 unsigned int gfporder;
387 387
388 /* force GFP flags, e.g. GFP_DMA */ 388 /* force GFP flags, e.g. GFP_DMA */
389 unsigned int gfpflags; 389 gfp_t gfpflags;
390 390
391 size_t colour; /* cache colouring range */ 391 size_t colour; /* cache colouring range */
392 unsigned int colour_off; /* colour offset */ 392 unsigned int colour_off; /* colour offset */
@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
2117 slabp->free = 0; 2117 slabp->free = 0;
2118} 2118}
2119 2119
2120static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags) 2120static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
2121{ 2121{
2122 if (flags & SLAB_DMA) { 2122 if (flags & SLAB_DMA) {
2123 if (!(cachep->gfpflags & GFP_DMA)) 2123 if (!(cachep->gfpflags & GFP_DMA))
@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2152 struct slab *slabp; 2152 struct slab *slabp;
2153 void *objp; 2153 void *objp;
2154 size_t offset; 2154 size_t offset;
2155 unsigned int local_flags; 2155 gfp_t local_flags;
2156 unsigned long ctor_flags; 2156 unsigned long ctor_flags;
2157 struct kmem_list3 *l3; 2157 struct kmem_list3 *l3;
2158 2158
@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2546/* 2546/*
2547 * A interface to enable slab creation on nodeid 2547 * A interface to enable slab creation on nodeid
2548 */ 2548 */
2549static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) 2549static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2550{ 2550{
2551 struct list_head *entry; 2551 struct list_head *entry;
2552 struct slab *slabp; 2552 struct slab *slabp;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 64f9570cff56..843c87d1e61f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,7 +70,7 @@ struct scan_control {
70 unsigned int priority; 70 unsigned int priority;
71 71
72 /* This context's GFP mask */ 72 /* This context's GFP mask */
73 unsigned int gfp_mask; 73 gfp_t gfp_mask;
74 74
75 int may_writepage; 75 int may_writepage;
76 76
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
186 * 186 *
187 * Returns the number of slab objects which we shrunk. 187 * Returns the number of slab objects which we shrunk.
188 */ 188 */
189static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, 189static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
190 unsigned long lru_pages) 190 unsigned long lru_pages)
191{ 191{
192 struct shrinker *shrinker; 192 struct shrinker *shrinker;
@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
926 * holds filesystem locks which prevent writeout this might not work, and the 926 * holds filesystem locks which prevent writeout this might not work, and the
927 * allocation attempt will fail. 927 * allocation attempt will fail.
928 */ 928 */
929int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) 929int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
930{ 930{
931 int priority; 931 int priority;
932 int ret = 0; 932 int ret = 0;
@@ -1338,7 +1338,7 @@ module_init(kswapd_init)
1338/* 1338/*
1339 * Try to free up some pages from this zone through reclaim. 1339 * Try to free up some pages from this zone through reclaim.
1340 */ 1340 */
1341int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) 1341int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1342{ 1342{
1343 struct scan_control sc; 1343 struct scan_control sc;
1344 int nr_pages = 1 << order; 1344 int nr_pages = 1 << order;