aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c31
-rw-r--r--mm/fremap.c3
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/hugetlb.c57
-rw-r--r--mm/madvise.c11
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/mempool.c6
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c34
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c13
17 files changed, 121 insertions, 78 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index c1330cc19783..a58699b6579e 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -154,10 +154,10 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
154 */ 154 */
155static void * __init 155static void * __init
156__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, 156__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
157 unsigned long align, unsigned long goal) 157 unsigned long align, unsigned long goal, unsigned long limit)
158{ 158{
159 unsigned long offset, remaining_size, areasize, preferred; 159 unsigned long offset, remaining_size, areasize, preferred;
160 unsigned long i, start = 0, incr, eidx; 160 unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn;
161 void *ret; 161 void *ret;
162 162
163 if(!size) { 163 if(!size) {
@@ -166,7 +166,14 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
166 } 166 }
167 BUG_ON(align & (align-1)); 167 BUG_ON(align & (align-1));
168 168
169 eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); 169 if (limit && bdata->node_boot_start >= limit)
170 return NULL;
171
172 limit >>=PAGE_SHIFT;
173 if (limit && end_pfn > limit)
174 end_pfn = limit;
175
176 eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
170 offset = 0; 177 offset = 0;
171 if (align && 178 if (align &&
172 (bdata->node_boot_start & (align - 1UL)) != 0) 179 (bdata->node_boot_start & (align - 1UL)) != 0)
@@ -178,11 +185,12 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
178 * first, then we try to allocate lower pages. 185 * first, then we try to allocate lower pages.
179 */ 186 */
180 if (goal && (goal >= bdata->node_boot_start) && 187 if (goal && (goal >= bdata->node_boot_start) &&
181 ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { 188 ((goal >> PAGE_SHIFT) < end_pfn)) {
182 preferred = goal - bdata->node_boot_start; 189 preferred = goal - bdata->node_boot_start;
183 190
184 if (bdata->last_success >= preferred) 191 if (bdata->last_success >= preferred)
185 preferred = bdata->last_success; 192 if (!limit || (limit && limit > bdata->last_success))
193 preferred = bdata->last_success;
186 } else 194 } else
187 preferred = 0; 195 preferred = 0;
188 196
@@ -382,14 +390,15 @@ unsigned long __init free_all_bootmem (void)
382 return(free_all_bootmem_core(NODE_DATA(0))); 390 return(free_all_bootmem_core(NODE_DATA(0)));
383} 391}
384 392
385void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) 393void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal,
394 unsigned long limit)
386{ 395{
387 pg_data_t *pgdat = pgdat_list; 396 pg_data_t *pgdat = pgdat_list;
388 void *ptr; 397 void *ptr;
389 398
390 for_each_pgdat(pgdat) 399 for_each_pgdat(pgdat)
391 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 400 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
392 align, goal))) 401 align, goal, limit)))
393 return(ptr); 402 return(ptr);
394 403
395 /* 404 /*
@@ -400,14 +409,16 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned
400 return NULL; 409 return NULL;
401} 410}
402 411
403void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) 412
413void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align,
414 unsigned long goal, unsigned long limit)
404{ 415{
405 void *ptr; 416 void *ptr;
406 417
407 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal); 418 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit);
408 if (ptr) 419 if (ptr)
409 return (ptr); 420 return (ptr);
410 421
411 return __alloc_bootmem(size, align, goal); 422 return __alloc_bootmem_limit(size, align, goal, limit);
412} 423}
413 424
diff --git a/mm/fremap.c b/mm/fremap.c
index 3235fb77c133..ab23a0673c35 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -89,6 +89,9 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
89 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 89 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
90 if (!page->mapping || page->index >= size) 90 if (!page->mapping || page->index >= size)
91 goto err_unlock; 91 goto err_unlock;
92 err = -ENOMEM;
93 if (page_mapcount(page) > INT_MAX/2)
94 goto err_unlock;
92 95
93 zap_pte(mm, vma, addr, pte); 96 zap_pte(mm, vma, addr, pte);
94 97
diff --git a/mm/highmem.c b/mm/highmem.c
index 400911599468..90e1861e2da0 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,7 +30,7 @@
30 30
31static mempool_t *page_pool, *isa_page_pool; 31static mempool_t *page_pool, *isa_page_pool;
32 32
33static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) 33static void *page_pool_alloc(gfp_t gfp_mask, void *data)
34{ 34{
35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 35 unsigned int gfp = gfp_mask | (unsigned int) (long) data;
36 36
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 901ac523a1c3..61d380678030 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -274,21 +274,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
274{ 274{
275 pte_t *src_pte, *dst_pte, entry; 275 pte_t *src_pte, *dst_pte, entry;
276 struct page *ptepage; 276 struct page *ptepage;
277 unsigned long addr = vma->vm_start; 277 unsigned long addr;
278 unsigned long end = vma->vm_end;
279 278
280 while (addr < end) { 279 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
281 dst_pte = huge_pte_alloc(dst, addr); 280 dst_pte = huge_pte_alloc(dst, addr);
282 if (!dst_pte) 281 if (!dst_pte)
283 goto nomem; 282 goto nomem;
283 spin_lock(&src->page_table_lock);
284 src_pte = huge_pte_offset(src, addr); 284 src_pte = huge_pte_offset(src, addr);
285 BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */ 285 if (src_pte && !pte_none(*src_pte)) {
286 entry = *src_pte; 286 entry = *src_pte;
287 ptepage = pte_page(entry); 287 ptepage = pte_page(entry);
288 get_page(ptepage); 288 get_page(ptepage);
289 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); 289 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
290 set_huge_pte_at(dst, addr, dst_pte, entry); 290 set_huge_pte_at(dst, addr, dst_pte, entry);
291 addr += HPAGE_SIZE; 291 }
292 spin_unlock(&src->page_table_lock);
292 } 293 }
293 return 0; 294 return 0;
294 295
@@ -323,8 +324,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
323 324
324 page = pte_page(pte); 325 page = pte_page(pte);
325 put_page(page); 326 put_page(page);
327 add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE));
326 } 328 }
327 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
328 flush_tlb_range(vma, start, end); 329 flush_tlb_range(vma, start, end);
329} 330}
330 331
@@ -393,6 +394,28 @@ out:
393 return ret; 394 return ret;
394} 395}
395 396
397/*
398 * On ia64 at least, it is possible to receive a hugetlb fault from a
399 * stale zero entry left in the TLB from earlier hardware prefetching.
400 * Low-level arch code should already have flushed the stale entry as
401 * part of its fault handling, but we do need to accept this minor fault
402 * and return successfully. Whereas the "normal" case is that this is
403 * an access to a hugetlb page which has been truncated off since mmap.
404 */
405int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
406 unsigned long address, int write_access)
407{
408 int ret = VM_FAULT_SIGBUS;
409 pte_t *pte;
410
411 spin_lock(&mm->page_table_lock);
412 pte = huge_pte_offset(mm, address);
413 if (pte && !pte_none(*pte))
414 ret = VM_FAULT_MINOR;
415 spin_unlock(&mm->page_table_lock);
416 return ret;
417}
418
396int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 419int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
397 struct page **pages, struct vm_area_struct **vmas, 420 struct page **pages, struct vm_area_struct **vmas,
398 unsigned long *position, int *length, int i) 421 unsigned long *position, int *length, int i)
@@ -403,6 +426,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
403 BUG_ON(!is_vm_hugetlb_page(vma)); 426 BUG_ON(!is_vm_hugetlb_page(vma));
404 427
405 vpfn = vaddr/PAGE_SIZE; 428 vpfn = vaddr/PAGE_SIZE;
429 spin_lock(&mm->page_table_lock);
406 while (vaddr < vma->vm_end && remainder) { 430 while (vaddr < vma->vm_end && remainder) {
407 431
408 if (pages) { 432 if (pages) {
@@ -415,8 +439,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
415 * indexing below to work. */ 439 * indexing below to work. */
416 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 440 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
417 441
418 /* hugetlb should be locked, and hence, prefaulted */ 442 /* the hugetlb file might have been truncated */
419 WARN_ON(!pte || pte_none(*pte)); 443 if (!pte || pte_none(*pte)) {
444 remainder = 0;
445 if (!i)
446 i = -EFAULT;
447 break;
448 }
420 449
421 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; 450 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
422 451
@@ -434,7 +463,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
434 --remainder; 463 --remainder;
435 ++i; 464 ++i;
436 } 465 }
437 466 spin_unlock(&mm->page_table_lock);
438 *length = remainder; 467 *length = remainder;
439 *position = vaddr; 468 *position = vaddr;
440 469
diff --git a/mm/madvise.c b/mm/madvise.c
index 4454936f87d1..20e075d1c64c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,6 +83,9 @@ static long madvise_willneed(struct vm_area_struct * vma,
83{ 83{
84 struct file *file = vma->vm_file; 84 struct file *file = vma->vm_file;
85 85
86 if (!file)
87 return -EBADF;
88
86 if (file->f_mapping->a_ops->get_xip_page) { 89 if (file->f_mapping->a_ops->get_xip_page) {
87 /* no bad return value, but ignore advice */ 90 /* no bad return value, but ignore advice */
88 return 0; 91 return 0;
@@ -141,11 +144,7 @@ static long
141madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, 144madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
142 unsigned long start, unsigned long end, int behavior) 145 unsigned long start, unsigned long end, int behavior)
143{ 146{
144 struct file *filp = vma->vm_file; 147 long error;
145 long error = -EBADF;
146
147 if (!filp)
148 goto out;
149 148
150 switch (behavior) { 149 switch (behavior) {
151 case MADV_NORMAL: 150 case MADV_NORMAL:
@@ -166,8 +165,6 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
166 error = -EINVAL; 165 error = -EINVAL;
167 break; 166 break;
168 } 167 }
169
170out:
171 return error; 168 return error;
172} 169}
173 170
diff --git a/mm/memory.c b/mm/memory.c
index ae8161f1f459..1db40e935e55 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2045,8 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
2045 2045
2046 inc_page_state(pgfault); 2046 inc_page_state(pgfault);
2047 2047
2048 if (is_vm_hugetlb_page(vma)) 2048 if (unlikely(is_vm_hugetlb_page(vma)))
2049 return VM_FAULT_SIGBUS; /* mapping truncation does this. */ 2049 return hugetlb_fault(mm, vma, address, write_access);
2050 2050
2051 /* 2051 /*
2052 * We need the page table lock to synchronize with kswapd 2052 * We need the page table lock to synchronize with kswapd
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9033f0859aa8..37af443eb094 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -687,7 +687,7 @@ get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned lo
687} 687}
688 688
689/* Return a zonelist representing a mempolicy */ 689/* Return a zonelist representing a mempolicy */
690static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) 690static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
691{ 691{
692 int nd; 692 int nd;
693 693
@@ -751,7 +751,7 @@ static unsigned offset_il_node(struct mempolicy *pol,
751 751
752/* Allocate a page in interleaved policy. 752/* Allocate a page in interleaved policy.
753 Own path because it needs to do special accounting. */ 753 Own path because it needs to do special accounting. */
754static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) 754static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid)
755{ 755{
756 struct zonelist *zl; 756 struct zonelist *zl;
757 struct page *page; 757 struct page *page;
@@ -789,7 +789,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or
789 * Should be called with the mm_sem of the vma hold. 789 * Should be called with the mm_sem of the vma hold.
790 */ 790 */
791struct page * 791struct page *
792alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) 792alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
793{ 793{
794 struct mempolicy *pol = get_vma_policy(current, vma, addr); 794 struct mempolicy *pol = get_vma_policy(current, vma, addr);
795 795
@@ -832,7 +832,7 @@ alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned l
832 * 1) it's ok to take cpuset_sem (can WAIT), and 832 * 1) it's ok to take cpuset_sem (can WAIT), and
833 * 2) allocating for current task (not interrupt). 833 * 2) allocating for current task (not interrupt).
834 */ 834 */
835struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) 835struct page *alloc_pages_current(gfp_t gfp, unsigned order)
836{ 836{
837 struct mempolicy *pol = current->mempolicy; 837 struct mempolicy *pol = current->mempolicy;
838 838
diff --git a/mm/mempool.c b/mm/mempool.c
index 65f2957b8d51..9e377ea700b2 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(mempool_create_node);
112 * while this function is running. mempool_alloc() & mempool_free() 112 * while this function is running. mempool_alloc() & mempool_free()
113 * might be called (eg. from IRQ contexts) while this function executes. 113 * might be called (eg. from IRQ contexts) while this function executes.
114 */ 114 */
115int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) 115int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
116{ 116{
117 void *element; 117 void *element;
118 void **new_elements; 118 void **new_elements;
@@ -200,7 +200,7 @@ EXPORT_SYMBOL(mempool_destroy);
200 * *never* fails when called from process contexts. (it might 200 * *never* fails when called from process contexts. (it might
201 * fail if called from an IRQ context.) 201 * fail if called from an IRQ context.)
202 */ 202 */
203void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) 203void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
204{ 204{
205 void *element; 205 void *element;
206 unsigned long flags; 206 unsigned long flags;
@@ -276,7 +276,7 @@ EXPORT_SYMBOL(mempool_free);
276/* 276/*
277 * A commonly used alloc and free fn. 277 * A commonly used alloc and free fn.
278 */ 278 */
279void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) 279void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
280{ 280{
281 kmem_cache_t *mem = (kmem_cache_t *) pool_data; 281 kmem_cache_t *mem = (kmem_cache_t *) pool_data;
282 return kmem_cache_alloc(mem, gfp_mask); 282 return kmem_cache_alloc(mem, gfp_mask);
diff --git a/mm/nommu.c b/mm/nommu.c
index 064d70442895..0ef241ae3763 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -157,8 +157,7 @@ void vfree(void *addr)
157 kfree(addr); 157 kfree(addr);
158} 158}
159 159
160void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, 160void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
161 pgprot_t prot)
162{ 161{
163 /* 162 /*
164 * kmalloc doesn't like __GFP_HIGHMEM for some reason 163 * kmalloc doesn't like __GFP_HIGHMEM for some reason
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ac3bf33e5370..d348b9035955 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -263,7 +263,7 @@ static struct mm_struct *oom_kill_process(struct task_struct *p)
263 * OR try to be smart about which process to kill. Note that we 263 * OR try to be smart about which process to kill. Note that we
264 * don't have to be perfect here, we just have to be good. 264 * don't have to be perfect here, we just have to be good.
265 */ 265 */
266void out_of_memory(unsigned int __nocast gfp_mask, int order) 266void out_of_memory(gfp_t gfp_mask, int order)
267{ 267{
268 struct mm_struct *mm = NULL; 268 struct mm_struct *mm = NULL;
269 task_t * p; 269 task_t * p;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae2903339e71..e1d3d77f4aee 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -671,7 +671,7 @@ void fastcall free_cold_page(struct page *page)
671 free_hot_cold_page(page, 1); 671 free_hot_cold_page(page, 1);
672} 672}
673 673
674static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) 674static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
675{ 675{
676 int i; 676 int i;
677 677
@@ -686,7 +686,7 @@ static inline void prep_zero_page(struct page *page, int order, unsigned int __n
686 * or two. 686 * or two.
687 */ 687 */
688static struct page * 688static struct page *
689buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) 689buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
690{ 690{
691 unsigned long flags; 691 unsigned long flags;
692 struct page *page = NULL; 692 struct page *page = NULL;
@@ -761,7 +761,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
761} 761}
762 762
763static inline int 763static inline int
764should_reclaim_zone(struct zone *z, unsigned int gfp_mask) 764should_reclaim_zone(struct zone *z, gfp_t gfp_mask)
765{ 765{
766 if (!z->reclaim_pages) 766 if (!z->reclaim_pages)
767 return 0; 767 return 0;
@@ -774,7 +774,7 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
774 * This is the 'heart' of the zoned buddy allocator. 774 * This is the 'heart' of the zoned buddy allocator.
775 */ 775 */
776struct page * fastcall 776struct page * fastcall
777__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, 777__alloc_pages(gfp_t gfp_mask, unsigned int order,
778 struct zonelist *zonelist) 778 struct zonelist *zonelist)
779{ 779{
780 const int wait = gfp_mask & __GFP_WAIT; 780 const int wait = gfp_mask & __GFP_WAIT;
@@ -977,7 +977,7 @@ EXPORT_SYMBOL(__alloc_pages);
977/* 977/*
978 * Common helper functions. 978 * Common helper functions.
979 */ 979 */
980fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) 980fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
981{ 981{
982 struct page * page; 982 struct page * page;
983 page = alloc_pages(gfp_mask, order); 983 page = alloc_pages(gfp_mask, order);
@@ -988,7 +988,7 @@ fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned
988 988
989EXPORT_SYMBOL(__get_free_pages); 989EXPORT_SYMBOL(__get_free_pages);
990 990
991fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) 991fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
992{ 992{
993 struct page * page; 993 struct page * page;
994 994
@@ -1750,6 +1750,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1750{ 1750{
1751 struct per_cpu_pages *pcp; 1751 struct per_cpu_pages *pcp;
1752 1752
1753 memset(p, 0, sizeof(*p));
1754
1753 pcp = &p->pcp[0]; /* hot */ 1755 pcp = &p->pcp[0]; /* hot */
1754 pcp->count = 0; 1756 pcp->count = 0;
1755 pcp->low = 2 * batch; 1757 pcp->low = 2 * batch;
diff --git a/mm/page_io.c b/mm/page_io.c
index 2e605a19ce57..330e00d6db00 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,7 +19,7 @@
19#include <linux/writeback.h> 19#include <linux/writeback.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21 21
22static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, 22static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
23 struct page *page, bio_end_io_t end_io) 23 struct page *page, bio_end_io_t end_io)
24{ 24{
25 struct bio *bio; 25 struct bio *bio;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1f7aeb210c7b..ea064d89cda9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -921,8 +921,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
921} 921}
922 922
923static inline struct page * 923static inline struct page *
924shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, 924shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
925 unsigned long idx)
926{ 925{
927 return alloc_page(gfp | __GFP_ZERO); 926 return alloc_page(gfp | __GFP_ZERO);
928} 927}
diff --git a/mm/slab.c b/mm/slab.c
index 5cbbdfa6dd0e..d05c678bceb3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -650,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
650 return cachep->array[smp_processor_id()]; 650 return cachep->array[smp_processor_id()];
651} 651}
652 652
653static inline kmem_cache_t *__find_general_cachep(size_t size, 653static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
654 unsigned int __nocast gfpflags)
655{ 654{
656 struct cache_sizes *csizep = malloc_sizes; 655 struct cache_sizes *csizep = malloc_sizes;
657 656
@@ -675,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
675 return csizep->cs_cachep; 674 return csizep->cs_cachep;
676} 675}
677 676
678kmem_cache_t *kmem_find_general_cachep(size_t size, 677kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
679 unsigned int __nocast gfpflags)
680{ 678{
681 return __find_general_cachep(size, gfpflags); 679 return __find_general_cachep(size, gfpflags);
682} 680}
@@ -1185,7 +1183,7 @@ __initcall(cpucache_init);
1185 * did not request dmaable memory, we might get it, but that 1183 * did not request dmaable memory, we might get it, but that
1186 * would be relatively rare and ignorable. 1184 * would be relatively rare and ignorable.
1187 */ 1185 */
1188static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 1186static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
1189{ 1187{
1190 struct page *page; 1188 struct page *page;
1191 void *addr; 1189 void *addr;
@@ -2048,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2048 2046
2049/* Get the memory for a slab management obj. */ 2047/* Get the memory for a slab management obj. */
2050static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2048static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
2051 int colour_off, unsigned int __nocast local_flags) 2049 int colour_off, gfp_t local_flags)
2052{ 2050{
2053 struct slab *slabp; 2051 struct slab *slabp;
2054 2052
@@ -2149,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
2149 * Grow (by 1) the number of slabs within a cache. This is called by 2147 * Grow (by 1) the number of slabs within a cache. This is called by
2150 * kmem_cache_alloc() when there are no active objs left in a cache. 2148 * kmem_cache_alloc() when there are no active objs left in a cache.
2151 */ 2149 */
2152static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2150static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2153{ 2151{
2154 struct slab *slabp; 2152 struct slab *slabp;
2155 void *objp; 2153 void *objp;
@@ -2356,7 +2354,7 @@ bad:
2356#define check_slabp(x,y) do { } while(0) 2354#define check_slabp(x,y) do { } while(0)
2357#endif 2355#endif
2358 2356
2359static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) 2357static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
2360{ 2358{
2361 int batchcount; 2359 int batchcount;
2362 struct kmem_list3 *l3; 2360 struct kmem_list3 *l3;
@@ -2456,7 +2454,7 @@ alloc_done:
2456} 2454}
2457 2455
2458static inline void 2456static inline void
2459cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) 2457cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
2460{ 2458{
2461 might_sleep_if(flags & __GFP_WAIT); 2459 might_sleep_if(flags & __GFP_WAIT);
2462#if DEBUG 2460#if DEBUG
@@ -2467,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
2467#if DEBUG 2465#if DEBUG
2468static void * 2466static void *
2469cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2467cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2470 unsigned int __nocast flags, void *objp, void *caller) 2468 gfp_t flags, void *objp, void *caller)
2471{ 2469{
2472 if (!objp) 2470 if (!objp)
2473 return objp; 2471 return objp;
@@ -2510,7 +2508,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2510#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2508#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2511#endif 2509#endif
2512 2510
2513static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2511static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2514{ 2512{
2515 void* objp; 2513 void* objp;
2516 struct array_cache *ac; 2514 struct array_cache *ac;
@@ -2528,7 +2526,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast
2528 return objp; 2526 return objp;
2529} 2527}
2530 2528
2531static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2529static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2532{ 2530{
2533 unsigned long save_flags; 2531 unsigned long save_flags;
2534 void* objp; 2532 void* objp;
@@ -2787,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2787 * Allocate an object from this cache. The flags are only relevant 2785 * Allocate an object from this cache. The flags are only relevant
2788 * if the cache has no available objects. 2786 * if the cache has no available objects.
2789 */ 2787 */
2790void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2788void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2791{ 2789{
2792 return __cache_alloc(cachep, flags); 2790 return __cache_alloc(cachep, flags);
2793} 2791}
@@ -2848,7 +2846,7 @@ out:
2848 * New and improved: it will now make sure that the object gets 2846 * New and improved: it will now make sure that the object gets
2849 * put on the correct node list so that there is no false sharing. 2847 * put on the correct node list so that there is no false sharing.
2850 */ 2848 */
2851void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2849void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2852{ 2850{
2853 unsigned long save_flags; 2851 unsigned long save_flags;
2854 void *ptr; 2852 void *ptr;
@@ -2875,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2875} 2873}
2876EXPORT_SYMBOL(kmem_cache_alloc_node); 2874EXPORT_SYMBOL(kmem_cache_alloc_node);
2877 2875
2878void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 2876void *kmalloc_node(size_t size, gfp_t flags, int node)
2879{ 2877{
2880 kmem_cache_t *cachep; 2878 kmem_cache_t *cachep;
2881 2879
@@ -2908,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node);
2908 * platforms. For example, on i386, it means that the memory must come 2906 * platforms. For example, on i386, it means that the memory must come
2909 * from the first 16MB. 2907 * from the first 16MB.
2910 */ 2908 */
2911void *__kmalloc(size_t size, unsigned int __nocast flags) 2909void *__kmalloc(size_t size, gfp_t flags)
2912{ 2910{
2913 kmem_cache_t *cachep; 2911 kmem_cache_t *cachep;
2914 2912
@@ -2997,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free);
2997 * @size: how many bytes of memory are required. 2995 * @size: how many bytes of memory are required.
2998 * @flags: the type of memory to allocate. 2996 * @flags: the type of memory to allocate.
2999 */ 2997 */
3000void *kzalloc(size_t size, unsigned int __nocast flags) 2998void *kzalloc(size_t size, gfp_t flags)
3001{ 2999{
3002 void *ret = kmalloc(size, flags); 3000 void *ret = kmalloc(size, flags);
3003 if (ret) 3001 if (ret)
@@ -3603,7 +3601,7 @@ unsigned int ksize(const void *objp)
3603 * @s: the string to duplicate 3601 * @s: the string to duplicate
3604 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3602 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
3605 */ 3603 */
3606char *kstrdup(const char *s, unsigned int __nocast gfp) 3604char *kstrdup(const char *s, gfp_t gfp)
3607{ 3605{
3608 size_t len; 3606 size_t len;
3609 char *buf; 3607 char *buf;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index adbc2b426c2f..132164f7d0a7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -68,7 +68,7 @@ void show_swap_cache_info(void)
68 * but sets SwapCache flag and private instead of mapping and index. 68 * but sets SwapCache flag and private instead of mapping and index.
69 */ 69 */
70static int __add_to_swap_cache(struct page *page, swp_entry_t entry, 70static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
71 unsigned int __nocast gfp_mask) 71 gfp_t gfp_mask)
72{ 72{
73 int error; 73 int error;
74 74
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 13c3d82968ae..1150229b6366 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -395,7 +395,7 @@ void *vmap(struct page **pages, unsigned int count,
395 395
396EXPORT_SYMBOL(vmap); 396EXPORT_SYMBOL(vmap);
397 397
398void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) 398void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
399{ 399{
400 struct page **pages; 400 struct page **pages;
401 unsigned int nr_pages, array_size, i; 401 unsigned int nr_pages, array_size, i;
@@ -446,7 +446,7 @@ fail:
446 * allocator with @gfp_mask flags. Map them into contiguous 446 * allocator with @gfp_mask flags. Map them into contiguous
447 * kernel virtual space, using a pagetable protection of @prot. 447 * kernel virtual space, using a pagetable protection of @prot.
448 */ 448 */
449void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) 449void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
450{ 450{
451 struct vm_struct *area; 451 struct vm_struct *area;
452 452
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0ea71e887bb6..64f9570cff56 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -511,10 +511,11 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
511 * PageDirty _after_ making sure that the page is freeable and 511 * PageDirty _after_ making sure that the page is freeable and
512 * not in use by anybody. (pagecache + us == 2) 512 * not in use by anybody. (pagecache + us == 2)
513 */ 513 */
514 if (page_count(page) != 2 || PageDirty(page)) { 514 if (unlikely(page_count(page) != 2))
515 write_unlock_irq(&mapping->tree_lock); 515 goto cannot_free;
516 goto keep_locked; 516 smp_rmb();
517 } 517 if (unlikely(PageDirty(page)))
518 goto cannot_free;
518 519
519#ifdef CONFIG_SWAP 520#ifdef CONFIG_SWAP
520 if (PageSwapCache(page)) { 521 if (PageSwapCache(page)) {
@@ -538,6 +539,10 @@ free_it:
538 __pagevec_release_nonlru(&freed_pvec); 539 __pagevec_release_nonlru(&freed_pvec);
539 continue; 540 continue;
540 541
542cannot_free:
543 write_unlock_irq(&mapping->tree_lock);
544 goto keep_locked;
545
541activate_locked: 546activate_locked:
542 SetPageActive(page); 547 SetPageActive(page);
543 pgactivate++; 548 pgactivate++;