diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 4 | ||||
| -rw-r--r-- | mm/bootmem.c | 45 | ||||
| -rw-r--r-- | mm/fremap.c | 3 | ||||
| -rw-r--r-- | mm/highmem.c | 2 | ||||
| -rw-r--r-- | mm/hugetlb.c | 57 | ||||
| -rw-r--r-- | mm/madvise.c | 11 | ||||
| -rw-r--r-- | mm/memory.c | 4 | ||||
| -rw-r--r-- | mm/mempolicy.c | 8 | ||||
| -rw-r--r-- | mm/mempool.c | 6 | ||||
| -rw-r--r-- | mm/mmap.c | 2 | ||||
| -rw-r--r-- | mm/mprotect.c | 3 | ||||
| -rw-r--r-- | mm/mremap.c | 6 | ||||
| -rw-r--r-- | mm/nommu.c | 3 | ||||
| -rw-r--r-- | mm/oom_kill.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 12 | ||||
| -rw-r--r-- | mm/page_io.c | 2 | ||||
| -rw-r--r-- | mm/shmem.c | 3 | ||||
| -rw-r--r-- | mm/slab.c | 100 | ||||
| -rw-r--r-- | mm/swap_state.c | 2 | ||||
| -rw-r--r-- | mm/swapfile.c | 1 | ||||
| -rw-r--r-- | mm/vmalloc.c | 4 | ||||
| -rw-r--r-- | mm/vmscan.c | 13 |
22 files changed, 170 insertions, 123 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 4e9937ac3529..391ffc54d136 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -29,7 +29,7 @@ config FLATMEM_MANUAL | |||
| 29 | If unsure, choose this option (Flat Memory) over any other. | 29 | If unsure, choose this option (Flat Memory) over any other. |
| 30 | 30 | ||
| 31 | config DISCONTIGMEM_MANUAL | 31 | config DISCONTIGMEM_MANUAL |
| 32 | bool "Discontigious Memory" | 32 | bool "Discontiguous Memory" |
| 33 | depends on ARCH_DISCONTIGMEM_ENABLE | 33 | depends on ARCH_DISCONTIGMEM_ENABLE |
| 34 | help | 34 | help |
| 35 | This option provides enhanced support for discontiguous | 35 | This option provides enhanced support for discontiguous |
| @@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL | |||
| 52 | memory hotplug systems. This is normal. | 52 | memory hotplug systems. This is normal. |
| 53 | 53 | ||
| 54 | For many other systems, this will be an alternative to | 54 | For many other systems, this will be an alternative to |
| 55 | "Discontigious Memory". This option provides some potential | 55 | "Discontiguous Memory". This option provides some potential |
| 56 | performance benefits, along with decreased code complexity, | 56 | performance benefits, along with decreased code complexity, |
| 57 | but it is newer, and more experimental. | 57 | but it is newer, and more experimental. |
| 58 | 58 | ||
diff --git a/mm/bootmem.c b/mm/bootmem.c index 8ec4e4c2a179..a58699b6579e 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
| @@ -61,17 +61,9 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat, | |||
| 61 | { | 61 | { |
| 62 | bootmem_data_t *bdata = pgdat->bdata; | 62 | bootmem_data_t *bdata = pgdat->bdata; |
| 63 | unsigned long mapsize = ((end - start)+7)/8; | 63 | unsigned long mapsize = ((end - start)+7)/8; |
| 64 | static struct pglist_data *pgdat_last; | 64 | |
| 65 | 65 | pgdat->pgdat_next = pgdat_list; | |
| 66 | pgdat->pgdat_next = NULL; | 66 | pgdat_list = pgdat; |
| 67 | /* Add new nodes last so that bootmem always starts | ||
| 68 | searching in the first nodes, not the last ones */ | ||
| 69 | if (pgdat_last) | ||
| 70 | pgdat_last->pgdat_next = pgdat; | ||
| 71 | else { | ||
| 72 | pgdat_list = pgdat; | ||
| 73 | pgdat_last = pgdat; | ||
| 74 | } | ||
| 75 | 67 | ||
| 76 | mapsize = ALIGN(mapsize, sizeof(long)); | 68 | mapsize = ALIGN(mapsize, sizeof(long)); |
| 77 | bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); | 69 | bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); |
| @@ -162,10 +154,10 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | |||
| 162 | */ | 154 | */ |
| 163 | static void * __init | 155 | static void * __init |
| 164 | __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, | 156 | __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, |
| 165 | unsigned long align, unsigned long goal) | 157 | unsigned long align, unsigned long goal, unsigned long limit) |
| 166 | { | 158 | { |
| 167 | unsigned long offset, remaining_size, areasize, preferred; | 159 | unsigned long offset, remaining_size, areasize, preferred; |
| 168 | unsigned long i, start = 0, incr, eidx; | 160 | unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn; |
| 169 | void *ret; | 161 | void *ret; |
| 170 | 162 | ||
| 171 | if(!size) { | 163 | if(!size) { |
| @@ -174,7 +166,14 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, | |||
| 174 | } | 166 | } |
| 175 | BUG_ON(align & (align-1)); | 167 | BUG_ON(align & (align-1)); |
| 176 | 168 | ||
| 177 | eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); | 169 | if (limit && bdata->node_boot_start >= limit) |
| 170 | return NULL; | ||
| 171 | |||
| 172 | limit >>=PAGE_SHIFT; | ||
| 173 | if (limit && end_pfn > limit) | ||
| 174 | end_pfn = limit; | ||
| 175 | |||
| 176 | eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT); | ||
| 178 | offset = 0; | 177 | offset = 0; |
| 179 | if (align && | 178 | if (align && |
| 180 | (bdata->node_boot_start & (align - 1UL)) != 0) | 179 | (bdata->node_boot_start & (align - 1UL)) != 0) |
| @@ -186,11 +185,12 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, | |||
| 186 | * first, then we try to allocate lower pages. | 185 | * first, then we try to allocate lower pages. |
| 187 | */ | 186 | */ |
| 188 | if (goal && (goal >= bdata->node_boot_start) && | 187 | if (goal && (goal >= bdata->node_boot_start) && |
| 189 | ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { | 188 | ((goal >> PAGE_SHIFT) < end_pfn)) { |
| 190 | preferred = goal - bdata->node_boot_start; | 189 | preferred = goal - bdata->node_boot_start; |
| 191 | 190 | ||
| 192 | if (bdata->last_success >= preferred) | 191 | if (bdata->last_success >= preferred) |
| 193 | preferred = bdata->last_success; | 192 | if (!limit || (limit && limit > bdata->last_success)) |
| 193 | preferred = bdata->last_success; | ||
| 194 | } else | 194 | } else |
| 195 | preferred = 0; | 195 | preferred = 0; |
| 196 | 196 | ||
| @@ -390,14 +390,15 @@ unsigned long __init free_all_bootmem (void) | |||
| 390 | return(free_all_bootmem_core(NODE_DATA(0))); | 390 | return(free_all_bootmem_core(NODE_DATA(0))); |
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) | 393 | void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, |
| 394 | unsigned long limit) | ||
| 394 | { | 395 | { |
| 395 | pg_data_t *pgdat = pgdat_list; | 396 | pg_data_t *pgdat = pgdat_list; |
| 396 | void *ptr; | 397 | void *ptr; |
| 397 | 398 | ||
| 398 | for_each_pgdat(pgdat) | 399 | for_each_pgdat(pgdat) |
| 399 | if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, | 400 | if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, |
| 400 | align, goal))) | 401 | align, goal, limit))) |
| 401 | return(ptr); | 402 | return(ptr); |
| 402 | 403 | ||
| 403 | /* | 404 | /* |
| @@ -408,14 +409,16 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned | |||
| 408 | return NULL; | 409 | return NULL; |
| 409 | } | 410 | } |
| 410 | 411 | ||
| 411 | void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) | 412 | |
| 413 | void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, | ||
| 414 | unsigned long goal, unsigned long limit) | ||
| 412 | { | 415 | { |
| 413 | void *ptr; | 416 | void *ptr; |
| 414 | 417 | ||
| 415 | ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal); | 418 | ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit); |
| 416 | if (ptr) | 419 | if (ptr) |
| 417 | return (ptr); | 420 | return (ptr); |
| 418 | 421 | ||
| 419 | return __alloc_bootmem(size, align, goal); | 422 | return __alloc_bootmem_limit(size, align, goal, limit); |
| 420 | } | 423 | } |
| 421 | 424 | ||
diff --git a/mm/fremap.c b/mm/fremap.c index 3235fb77c133..ab23a0673c35 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
| @@ -89,6 +89,9 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 89 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 89 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| 90 | if (!page->mapping || page->index >= size) | 90 | if (!page->mapping || page->index >= size) |
| 91 | goto err_unlock; | 91 | goto err_unlock; |
| 92 | err = -ENOMEM; | ||
| 93 | if (page_mapcount(page) > INT_MAX/2) | ||
| 94 | goto err_unlock; | ||
| 92 | 95 | ||
| 93 | zap_pte(mm, vma, addr, pte); | 96 | zap_pte(mm, vma, addr, pte); |
| 94 | 97 | ||
diff --git a/mm/highmem.c b/mm/highmem.c index 400911599468..90e1861e2da0 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | static mempool_t *page_pool, *isa_page_pool; | 31 | static mempool_t *page_pool, *isa_page_pool; |
| 32 | 32 | ||
| 33 | static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) | 33 | static void *page_pool_alloc(gfp_t gfp_mask, void *data) |
| 34 | { | 34 | { |
| 35 | unsigned int gfp = gfp_mask | (unsigned int) (long) data; | 35 | unsigned int gfp = gfp_mask | (unsigned int) (long) data; |
| 36 | 36 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 901ac523a1c3..61d380678030 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -274,21 +274,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
| 274 | { | 274 | { |
| 275 | pte_t *src_pte, *dst_pte, entry; | 275 | pte_t *src_pte, *dst_pte, entry; |
| 276 | struct page *ptepage; | 276 | struct page *ptepage; |
| 277 | unsigned long addr = vma->vm_start; | 277 | unsigned long addr; |
| 278 | unsigned long end = vma->vm_end; | ||
| 279 | 278 | ||
| 280 | while (addr < end) { | 279 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { |
| 281 | dst_pte = huge_pte_alloc(dst, addr); | 280 | dst_pte = huge_pte_alloc(dst, addr); |
| 282 | if (!dst_pte) | 281 | if (!dst_pte) |
| 283 | goto nomem; | 282 | goto nomem; |
| 283 | spin_lock(&src->page_table_lock); | ||
| 284 | src_pte = huge_pte_offset(src, addr); | 284 | src_pte = huge_pte_offset(src, addr); |
| 285 | BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */ | 285 | if (src_pte && !pte_none(*src_pte)) { |
| 286 | entry = *src_pte; | 286 | entry = *src_pte; |
| 287 | ptepage = pte_page(entry); | 287 | ptepage = pte_page(entry); |
| 288 | get_page(ptepage); | 288 | get_page(ptepage); |
| 289 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | 289 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); |
| 290 | set_huge_pte_at(dst, addr, dst_pte, entry); | 290 | set_huge_pte_at(dst, addr, dst_pte, entry); |
| 291 | addr += HPAGE_SIZE; | 291 | } |
| 292 | spin_unlock(&src->page_table_lock); | ||
| 292 | } | 293 | } |
| 293 | return 0; | 294 | return 0; |
| 294 | 295 | ||
| @@ -323,8 +324,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
| 323 | 324 | ||
| 324 | page = pte_page(pte); | 325 | page = pte_page(pte); |
| 325 | put_page(page); | 326 | put_page(page); |
| 327 | add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE)); | ||
| 326 | } | 328 | } |
| 327 | add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); | ||
| 328 | flush_tlb_range(vma, start, end); | 329 | flush_tlb_range(vma, start, end); |
| 329 | } | 330 | } |
| 330 | 331 | ||
| @@ -393,6 +394,28 @@ out: | |||
| 393 | return ret; | 394 | return ret; |
| 394 | } | 395 | } |
| 395 | 396 | ||
| 397 | /* | ||
| 398 | * On ia64 at least, it is possible to receive a hugetlb fault from a | ||
| 399 | * stale zero entry left in the TLB from earlier hardware prefetching. | ||
| 400 | * Low-level arch code should already have flushed the stale entry as | ||
| 401 | * part of its fault handling, but we do need to accept this minor fault | ||
| 402 | * and return successfully. Whereas the "normal" case is that this is | ||
| 403 | * an access to a hugetlb page which has been truncated off since mmap. | ||
| 404 | */ | ||
| 405 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 406 | unsigned long address, int write_access) | ||
| 407 | { | ||
| 408 | int ret = VM_FAULT_SIGBUS; | ||
| 409 | pte_t *pte; | ||
| 410 | |||
| 411 | spin_lock(&mm->page_table_lock); | ||
| 412 | pte = huge_pte_offset(mm, address); | ||
| 413 | if (pte && !pte_none(*pte)) | ||
| 414 | ret = VM_FAULT_MINOR; | ||
| 415 | spin_unlock(&mm->page_table_lock); | ||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 396 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 419 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 397 | struct page **pages, struct vm_area_struct **vmas, | 420 | struct page **pages, struct vm_area_struct **vmas, |
| 398 | unsigned long *position, int *length, int i) | 421 | unsigned long *position, int *length, int i) |
| @@ -403,6 +426,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 403 | BUG_ON(!is_vm_hugetlb_page(vma)); | 426 | BUG_ON(!is_vm_hugetlb_page(vma)); |
| 404 | 427 | ||
| 405 | vpfn = vaddr/PAGE_SIZE; | 428 | vpfn = vaddr/PAGE_SIZE; |
| 429 | spin_lock(&mm->page_table_lock); | ||
| 406 | while (vaddr < vma->vm_end && remainder) { | 430 | while (vaddr < vma->vm_end && remainder) { |
| 407 | 431 | ||
| 408 | if (pages) { | 432 | if (pages) { |
| @@ -415,8 +439,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 415 | * indexing below to work. */ | 439 | * indexing below to work. */ |
| 416 | pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); | 440 | pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); |
| 417 | 441 | ||
| 418 | /* hugetlb should be locked, and hence, prefaulted */ | 442 | /* the hugetlb file might have been truncated */ |
| 419 | WARN_ON(!pte || pte_none(*pte)); | 443 | if (!pte || pte_none(*pte)) { |
| 444 | remainder = 0; | ||
| 445 | if (!i) | ||
| 446 | i = -EFAULT; | ||
| 447 | break; | ||
| 448 | } | ||
| 420 | 449 | ||
| 421 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | 450 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; |
| 422 | 451 | ||
| @@ -434,7 +463,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 434 | --remainder; | 463 | --remainder; |
| 435 | ++i; | 464 | ++i; |
| 436 | } | 465 | } |
| 437 | 466 | spin_unlock(&mm->page_table_lock); | |
| 438 | *length = remainder; | 467 | *length = remainder; |
| 439 | *position = vaddr; | 468 | *position = vaddr; |
| 440 | 469 | ||
diff --git a/mm/madvise.c b/mm/madvise.c index 4454936f87d1..20e075d1c64c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
| @@ -83,6 +83,9 @@ static long madvise_willneed(struct vm_area_struct * vma, | |||
| 83 | { | 83 | { |
| 84 | struct file *file = vma->vm_file; | 84 | struct file *file = vma->vm_file; |
| 85 | 85 | ||
| 86 | if (!file) | ||
| 87 | return -EBADF; | ||
| 88 | |||
| 86 | if (file->f_mapping->a_ops->get_xip_page) { | 89 | if (file->f_mapping->a_ops->get_xip_page) { |
| 87 | /* no bad return value, but ignore advice */ | 90 | /* no bad return value, but ignore advice */ |
| 88 | return 0; | 91 | return 0; |
| @@ -141,11 +144,7 @@ static long | |||
| 141 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | 144 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, |
| 142 | unsigned long start, unsigned long end, int behavior) | 145 | unsigned long start, unsigned long end, int behavior) |
| 143 | { | 146 | { |
| 144 | struct file *filp = vma->vm_file; | 147 | long error; |
| 145 | long error = -EBADF; | ||
| 146 | |||
| 147 | if (!filp) | ||
| 148 | goto out; | ||
| 149 | 148 | ||
| 150 | switch (behavior) { | 149 | switch (behavior) { |
| 151 | case MADV_NORMAL: | 150 | case MADV_NORMAL: |
| @@ -166,8 +165,6 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | |||
| 166 | error = -EINVAL; | 165 | error = -EINVAL; |
| 167 | break; | 166 | break; |
| 168 | } | 167 | } |
| 169 | |||
| 170 | out: | ||
| 171 | return error; | 168 | return error; |
| 172 | } | 169 | } |
| 173 | 170 | ||
diff --git a/mm/memory.c b/mm/memory.c index ae8161f1f459..1db40e935e55 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2045,8 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, | |||
| 2045 | 2045 | ||
| 2046 | inc_page_state(pgfault); | 2046 | inc_page_state(pgfault); |
| 2047 | 2047 | ||
| 2048 | if (is_vm_hugetlb_page(vma)) | 2048 | if (unlikely(is_vm_hugetlb_page(vma))) |
| 2049 | return VM_FAULT_SIGBUS; /* mapping truncation does this. */ | 2049 | return hugetlb_fault(mm, vma, address, write_access); |
| 2050 | 2050 | ||
| 2051 | /* | 2051 | /* |
| 2052 | * We need the page table lock to synchronize with kswapd | 2052 | * We need the page table lock to synchronize with kswapd |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9033f0859aa8..37af443eb094 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -687,7 +687,7 @@ get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned lo | |||
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | /* Return a zonelist representing a mempolicy */ | 689 | /* Return a zonelist representing a mempolicy */ |
| 690 | static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) | 690 | static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) |
| 691 | { | 691 | { |
| 692 | int nd; | 692 | int nd; |
| 693 | 693 | ||
| @@ -751,7 +751,7 @@ static unsigned offset_il_node(struct mempolicy *pol, | |||
| 751 | 751 | ||
| 752 | /* Allocate a page in interleaved policy. | 752 | /* Allocate a page in interleaved policy. |
| 753 | Own path because it needs to do special accounting. */ | 753 | Own path because it needs to do special accounting. */ |
| 754 | static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) | 754 | static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) |
| 755 | { | 755 | { |
| 756 | struct zonelist *zl; | 756 | struct zonelist *zl; |
| 757 | struct page *page; | 757 | struct page *page; |
| @@ -789,7 +789,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or | |||
| 789 | * Should be called with the mm_sem of the vma hold. | 789 | * Should be called with the mm_sem of the vma hold. |
| 790 | */ | 790 | */ |
| 791 | struct page * | 791 | struct page * |
| 792 | alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) | 792 | alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) |
| 793 | { | 793 | { |
| 794 | struct mempolicy *pol = get_vma_policy(current, vma, addr); | 794 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
| 795 | 795 | ||
| @@ -832,7 +832,7 @@ alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned l | |||
| 832 | * 1) it's ok to take cpuset_sem (can WAIT), and | 832 | * 1) it's ok to take cpuset_sem (can WAIT), and |
| 833 | * 2) allocating for current task (not interrupt). | 833 | * 2) allocating for current task (not interrupt). |
| 834 | */ | 834 | */ |
| 835 | struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) | 835 | struct page *alloc_pages_current(gfp_t gfp, unsigned order) |
| 836 | { | 836 | { |
| 837 | struct mempolicy *pol = current->mempolicy; | 837 | struct mempolicy *pol = current->mempolicy; |
| 838 | 838 | ||
diff --git a/mm/mempool.c b/mm/mempool.c index 65f2957b8d51..9e377ea700b2 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
| @@ -112,7 +112,7 @@ EXPORT_SYMBOL(mempool_create_node); | |||
| 112 | * while this function is running. mempool_alloc() & mempool_free() | 112 | * while this function is running. mempool_alloc() & mempool_free() |
| 113 | * might be called (eg. from IRQ contexts) while this function executes. | 113 | * might be called (eg. from IRQ contexts) while this function executes. |
| 114 | */ | 114 | */ |
| 115 | int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) | 115 | int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) |
| 116 | { | 116 | { |
| 117 | void *element; | 117 | void *element; |
| 118 | void **new_elements; | 118 | void **new_elements; |
| @@ -200,7 +200,7 @@ EXPORT_SYMBOL(mempool_destroy); | |||
| 200 | * *never* fails when called from process contexts. (it might | 200 | * *never* fails when called from process contexts. (it might |
| 201 | * fail if called from an IRQ context.) | 201 | * fail if called from an IRQ context.) |
| 202 | */ | 202 | */ |
| 203 | void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) | 203 | void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) |
| 204 | { | 204 | { |
| 205 | void *element; | 205 | void *element; |
| 206 | unsigned long flags; | 206 | unsigned long flags; |
| @@ -276,7 +276,7 @@ EXPORT_SYMBOL(mempool_free); | |||
| 276 | /* | 276 | /* |
| 277 | * A commonly used alloc and free fn. | 277 | * A commonly used alloc and free fn. |
| 278 | */ | 278 | */ |
| 279 | void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) | 279 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) |
| 280 | { | 280 | { |
| 281 | kmem_cache_t *mem = (kmem_cache_t *) pool_data; | 281 | kmem_cache_t *mem = (kmem_cache_t *) pool_data; |
| 282 | return kmem_cache_alloc(mem, gfp_mask); | 282 | return kmem_cache_alloc(mem, gfp_mask); |
| @@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) | |||
| 1640 | /* | 1640 | /* |
| 1641 | * Get rid of page table information in the indicated region. | 1641 | * Get rid of page table information in the indicated region. |
| 1642 | * | 1642 | * |
| 1643 | * Called with the page table lock held. | 1643 | * Called with the mm semaphore held. |
| 1644 | */ | 1644 | */ |
| 1645 | static void unmap_region(struct mm_struct *mm, | 1645 | static void unmap_region(struct mm_struct *mm, |
| 1646 | struct vm_area_struct *vma, struct vm_area_struct *prev, | 1646 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
diff --git a/mm/mprotect.c b/mm/mprotect.c index e9fbd013ad9a..57577f63b305 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
| @@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) | |||
| 248 | 248 | ||
| 249 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | 249 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); |
| 250 | 250 | ||
| 251 | if ((newflags & ~(newflags >> 4)) & 0xf) { | 251 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
| 252 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | ||
| 252 | error = -EACCES; | 253 | error = -EACCES; |
| 253 | goto out; | 254 | goto out; |
| 254 | } | 255 | } |
diff --git a/mm/mremap.c b/mm/mremap.c index a32fed454bd7..f343fc73a8bd 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
| @@ -141,10 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr, | |||
| 141 | if (dst) { | 141 | if (dst) { |
| 142 | pte_t pte; | 142 | pte_t pte; |
| 143 | pte = ptep_clear_flush(vma, old_addr, src); | 143 | pte = ptep_clear_flush(vma, old_addr, src); |
| 144 | |||
| 144 | /* ZERO_PAGE can be dependant on virtual addr */ | 145 | /* ZERO_PAGE can be dependant on virtual addr */ |
| 145 | if (pfn_valid(pte_pfn(pte)) && | 146 | pte = move_pte(pte, new_vma->vm_page_prot, |
| 146 | pte_page(pte) == ZERO_PAGE(old_addr)) | 147 | old_addr, new_addr); |
| 147 | pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot)); | ||
| 148 | set_pte_at(mm, new_addr, dst, pte); | 148 | set_pte_at(mm, new_addr, dst, pte); |
| 149 | } else | 149 | } else |
| 150 | error = -ENOMEM; | 150 | error = -ENOMEM; |
diff --git a/mm/nommu.c b/mm/nommu.c index 064d70442895..0ef241ae3763 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -157,8 +157,7 @@ void vfree(void *addr) | |||
| 157 | kfree(addr); | 157 | kfree(addr); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, | 160 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
| 161 | pgprot_t prot) | ||
| 162 | { | 161 | { |
| 163 | /* | 162 | /* |
| 164 | * kmalloc doesn't like __GFP_HIGHMEM for some reason | 163 | * kmalloc doesn't like __GFP_HIGHMEM for some reason |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ac3bf33e5370..d348b9035955 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -263,7 +263,7 @@ static struct mm_struct *oom_kill_process(struct task_struct *p) | |||
| 263 | * OR try to be smart about which process to kill. Note that we | 263 | * OR try to be smart about which process to kill. Note that we |
| 264 | * don't have to be perfect here, we just have to be good. | 264 | * don't have to be perfect here, we just have to be good. |
| 265 | */ | 265 | */ |
| 266 | void out_of_memory(unsigned int __nocast gfp_mask, int order) | 266 | void out_of_memory(gfp_t gfp_mask, int order) |
| 267 | { | 267 | { |
| 268 | struct mm_struct *mm = NULL; | 268 | struct mm_struct *mm = NULL; |
| 269 | task_t * p; | 269 | task_t * p; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ae2903339e71..cc1fe2672a31 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -671,7 +671,7 @@ void fastcall free_cold_page(struct page *page) | |||
| 671 | free_hot_cold_page(page, 1); | 671 | free_hot_cold_page(page, 1); |
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) | 674 | static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) |
| 675 | { | 675 | { |
| 676 | int i; | 676 | int i; |
| 677 | 677 | ||
| @@ -686,7 +686,7 @@ static inline void prep_zero_page(struct page *page, int order, unsigned int __n | |||
| 686 | * or two. | 686 | * or two. |
| 687 | */ | 687 | */ |
| 688 | static struct page * | 688 | static struct page * |
| 689 | buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) | 689 | buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) |
| 690 | { | 690 | { |
| 691 | unsigned long flags; | 691 | unsigned long flags; |
| 692 | struct page *page = NULL; | 692 | struct page *page = NULL; |
| @@ -761,7 +761,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
| 761 | } | 761 | } |
| 762 | 762 | ||
| 763 | static inline int | 763 | static inline int |
| 764 | should_reclaim_zone(struct zone *z, unsigned int gfp_mask) | 764 | should_reclaim_zone(struct zone *z, gfp_t gfp_mask) |
| 765 | { | 765 | { |
| 766 | if (!z->reclaim_pages) | 766 | if (!z->reclaim_pages) |
| 767 | return 0; | 767 | return 0; |
| @@ -774,7 +774,7 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask) | |||
| 774 | * This is the 'heart' of the zoned buddy allocator. | 774 | * This is the 'heart' of the zoned buddy allocator. |
| 775 | */ | 775 | */ |
| 776 | struct page * fastcall | 776 | struct page * fastcall |
| 777 | __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, | 777 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
| 778 | struct zonelist *zonelist) | 778 | struct zonelist *zonelist) |
| 779 | { | 779 | { |
| 780 | const int wait = gfp_mask & __GFP_WAIT; | 780 | const int wait = gfp_mask & __GFP_WAIT; |
| @@ -977,7 +977,7 @@ EXPORT_SYMBOL(__alloc_pages); | |||
| 977 | /* | 977 | /* |
| 978 | * Common helper functions. | 978 | * Common helper functions. |
| 979 | */ | 979 | */ |
| 980 | fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) | 980 | fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) |
| 981 | { | 981 | { |
| 982 | struct page * page; | 982 | struct page * page; |
| 983 | page = alloc_pages(gfp_mask, order); | 983 | page = alloc_pages(gfp_mask, order); |
| @@ -988,7 +988,7 @@ fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned | |||
| 988 | 988 | ||
| 989 | EXPORT_SYMBOL(__get_free_pages); | 989 | EXPORT_SYMBOL(__get_free_pages); |
| 990 | 990 | ||
| 991 | fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) | 991 | fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) |
| 992 | { | 992 | { |
| 993 | struct page * page; | 993 | struct page * page; |
| 994 | 994 | ||
diff --git a/mm/page_io.c b/mm/page_io.c index 2e605a19ce57..330e00d6db00 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include <linux/writeback.h> | 19 | #include <linux/writeback.h> |
| 20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
| 21 | 21 | ||
| 22 | static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, | 22 | static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, |
| 23 | struct page *page, bio_end_io_t end_io) | 23 | struct page *page, bio_end_io_t end_io) |
| 24 | { | 24 | { |
| 25 | struct bio *bio; | 25 | struct bio *bio; |
diff --git a/mm/shmem.c b/mm/shmem.c index 1f7aeb210c7b..ea064d89cda9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -921,8 +921,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) | |||
| 921 | } | 921 | } |
| 922 | 922 | ||
| 923 | static inline struct page * | 923 | static inline struct page * |
| 924 | shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, | 924 | shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) |
| 925 | unsigned long idx) | ||
| 926 | { | 925 | { |
| 927 | return alloc_page(gfp | __GFP_ZERO); | 926 | return alloc_page(gfp | __GFP_ZERO); |
| 928 | } | 927 | } |
| @@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | |||
| 308 | #define SIZE_L3 (1 + MAX_NUMNODES) | 308 | #define SIZE_L3 (1 + MAX_NUMNODES) |
| 309 | 309 | ||
| 310 | /* | 310 | /* |
| 311 | * This function may be completely optimized away if | 311 | * This function must be completely optimized away if |
| 312 | * a constant is passed to it. Mostly the same as | 312 | * a constant is passed to it. Mostly the same as |
| 313 | * what is in linux/slab.h except it returns an | 313 | * what is in linux/slab.h except it returns an |
| 314 | * index. | 314 | * index. |
| 315 | */ | 315 | */ |
| 316 | static inline int index_of(const size_t size) | 316 | static __always_inline int index_of(const size_t size) |
| 317 | { | 317 | { |
| 318 | if (__builtin_constant_p(size)) { | 318 | if (__builtin_constant_p(size)) { |
| 319 | int i = 0; | 319 | int i = 0; |
| @@ -329,7 +329,8 @@ static inline int index_of(const size_t size) | |||
| 329 | extern void __bad_size(void); | 329 | extern void __bad_size(void); |
| 330 | __bad_size(); | 330 | __bad_size(); |
| 331 | } | 331 | } |
| 332 | } | 332 | } else |
| 333 | BUG(); | ||
| 333 | return 0; | 334 | return 0; |
| 334 | } | 335 | } |
| 335 | 336 | ||
| @@ -639,7 +640,7 @@ static enum { | |||
| 639 | 640 | ||
| 640 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 641 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
| 641 | 642 | ||
| 642 | static void free_block(kmem_cache_t* cachep, void** objpp, int len); | 643 | static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); |
| 643 | static void enable_cpucache (kmem_cache_t *cachep); | 644 | static void enable_cpucache (kmem_cache_t *cachep); |
| 644 | static void cache_reap (void *unused); | 645 | static void cache_reap (void *unused); |
| 645 | static int __node_shrink(kmem_cache_t *cachep, int node); | 646 | static int __node_shrink(kmem_cache_t *cachep, int node); |
| @@ -649,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) | |||
| 649 | return cachep->array[smp_processor_id()]; | 650 | return cachep->array[smp_processor_id()]; |
| 650 | } | 651 | } |
| 651 | 652 | ||
| 652 | static inline kmem_cache_t *__find_general_cachep(size_t size, | 653 | static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) |
| 653 | unsigned int __nocast gfpflags) | ||
| 654 | { | 654 | { |
| 655 | struct cache_sizes *csizep = malloc_sizes; | 655 | struct cache_sizes *csizep = malloc_sizes; |
| 656 | 656 | ||
| @@ -674,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, | |||
| 674 | return csizep->cs_cachep; | 674 | return csizep->cs_cachep; |
| 675 | } | 675 | } |
| 676 | 676 | ||
| 677 | kmem_cache_t *kmem_find_general_cachep(size_t size, | 677 | kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
| 678 | unsigned int __nocast gfpflags) | ||
| 679 | { | 678 | { |
| 680 | return __find_general_cachep(size, gfpflags); | 679 | return __find_general_cachep(size, gfpflags); |
| 681 | } | 680 | } |
| @@ -804,7 +803,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache | |||
| 804 | 803 | ||
| 805 | if (ac->avail) { | 804 | if (ac->avail) { |
| 806 | spin_lock(&rl3->list_lock); | 805 | spin_lock(&rl3->list_lock); |
| 807 | free_block(cachep, ac->entry, ac->avail); | 806 | free_block(cachep, ac->entry, ac->avail, node); |
| 808 | ac->avail = 0; | 807 | ac->avail = 0; |
| 809 | spin_unlock(&rl3->list_lock); | 808 | spin_unlock(&rl3->list_lock); |
| 810 | } | 809 | } |
| @@ -925,7 +924,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
| 925 | /* Free limit for this kmem_list3 */ | 924 | /* Free limit for this kmem_list3 */ |
| 926 | l3->free_limit -= cachep->batchcount; | 925 | l3->free_limit -= cachep->batchcount; |
| 927 | if (nc) | 926 | if (nc) |
| 928 | free_block(cachep, nc->entry, nc->avail); | 927 | free_block(cachep, nc->entry, nc->avail, node); |
| 929 | 928 | ||
| 930 | if (!cpus_empty(mask)) { | 929 | if (!cpus_empty(mask)) { |
| 931 | spin_unlock(&l3->list_lock); | 930 | spin_unlock(&l3->list_lock); |
| @@ -934,7 +933,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
| 934 | 933 | ||
| 935 | if (l3->shared) { | 934 | if (l3->shared) { |
| 936 | free_block(cachep, l3->shared->entry, | 935 | free_block(cachep, l3->shared->entry, |
| 937 | l3->shared->avail); | 936 | l3->shared->avail, node); |
| 938 | kfree(l3->shared); | 937 | kfree(l3->shared); |
| 939 | l3->shared = NULL; | 938 | l3->shared = NULL; |
| 940 | } | 939 | } |
| @@ -1184,7 +1183,7 @@ __initcall(cpucache_init); | |||
| 1184 | * did not request dmaable memory, we might get it, but that | 1183 | * did not request dmaable memory, we might get it, but that |
| 1185 | * would be relatively rare and ignorable. | 1184 | * would be relatively rare and ignorable. |
| 1186 | */ | 1185 | */ |
| 1187 | static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 1186 | static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 1188 | { | 1187 | { |
| 1189 | struct page *page; | 1188 | struct page *page; |
| 1190 | void *addr; | 1189 | void *addr; |
| @@ -1882,12 +1881,13 @@ static void do_drain(void *arg) | |||
| 1882 | { | 1881 | { |
| 1883 | kmem_cache_t *cachep = (kmem_cache_t*)arg; | 1882 | kmem_cache_t *cachep = (kmem_cache_t*)arg; |
| 1884 | struct array_cache *ac; | 1883 | struct array_cache *ac; |
| 1884 | int node = numa_node_id(); | ||
| 1885 | 1885 | ||
| 1886 | check_irq_off(); | 1886 | check_irq_off(); |
| 1887 | ac = ac_data(cachep); | 1887 | ac = ac_data(cachep); |
| 1888 | spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); | 1888 | spin_lock(&cachep->nodelists[node]->list_lock); |
| 1889 | free_block(cachep, ac->entry, ac->avail); | 1889 | free_block(cachep, ac->entry, ac->avail, node); |
| 1890 | spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); | 1890 | spin_unlock(&cachep->nodelists[node]->list_lock); |
| 1891 | ac->avail = 0; | 1891 | ac->avail = 0; |
| 1892 | } | 1892 | } |
| 1893 | 1893 | ||
| @@ -2046,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2046 | 2046 | ||
| 2047 | /* Get the memory for a slab management obj. */ | 2047 | /* Get the memory for a slab management obj. */ |
| 2048 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, | 2048 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, |
| 2049 | int colour_off, unsigned int __nocast local_flags) | 2049 | int colour_off, gfp_t local_flags) |
| 2050 | { | 2050 | { |
| 2051 | struct slab *slabp; | 2051 | struct slab *slabp; |
| 2052 | 2052 | ||
| @@ -2147,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) | |||
| 2147 | * Grow (by 1) the number of slabs within a cache. This is called by | 2147 | * Grow (by 1) the number of slabs within a cache. This is called by |
| 2148 | * kmem_cache_alloc() when there are no active objs left in a cache. | 2148 | * kmem_cache_alloc() when there are no active objs left in a cache. |
| 2149 | */ | 2149 | */ |
| 2150 | static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 2150 | static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 2151 | { | 2151 | { |
| 2152 | struct slab *slabp; | 2152 | struct slab *slabp; |
| 2153 | void *objp; | 2153 | void *objp; |
| @@ -2354,7 +2354,7 @@ bad: | |||
| 2354 | #define check_slabp(x,y) do { } while(0) | 2354 | #define check_slabp(x,y) do { } while(0) |
| 2355 | #endif | 2355 | #endif |
| 2356 | 2356 | ||
| 2357 | static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) | 2357 | static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) |
| 2358 | { | 2358 | { |
| 2359 | int batchcount; | 2359 | int batchcount; |
| 2360 | struct kmem_list3 *l3; | 2360 | struct kmem_list3 *l3; |
| @@ -2454,7 +2454,7 @@ alloc_done: | |||
| 2454 | } | 2454 | } |
| 2455 | 2455 | ||
| 2456 | static inline void | 2456 | static inline void |
| 2457 | cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) | 2457 | cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) |
| 2458 | { | 2458 | { |
| 2459 | might_sleep_if(flags & __GFP_WAIT); | 2459 | might_sleep_if(flags & __GFP_WAIT); |
| 2460 | #if DEBUG | 2460 | #if DEBUG |
| @@ -2465,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) | |||
| 2465 | #if DEBUG | 2465 | #if DEBUG |
| 2466 | static void * | 2466 | static void * |
| 2467 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, | 2467 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, |
| 2468 | unsigned int __nocast flags, void *objp, void *caller) | 2468 | gfp_t flags, void *objp, void *caller) |
| 2469 | { | 2469 | { |
| 2470 | if (!objp) | 2470 | if (!objp) |
| 2471 | return objp; | 2471 | return objp; |
| @@ -2508,16 +2508,12 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, | |||
| 2508 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 2508 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) |
| 2509 | #endif | 2509 | #endif |
| 2510 | 2510 | ||
| 2511 | 2511 | static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |
| 2512 | static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) | ||
| 2513 | { | 2512 | { |
| 2514 | unsigned long save_flags; | ||
| 2515 | void* objp; | 2513 | void* objp; |
| 2516 | struct array_cache *ac; | 2514 | struct array_cache *ac; |
| 2517 | 2515 | ||
| 2518 | cache_alloc_debugcheck_before(cachep, flags); | 2516 | check_irq_off(); |
| 2519 | |||
| 2520 | local_irq_save(save_flags); | ||
| 2521 | ac = ac_data(cachep); | 2517 | ac = ac_data(cachep); |
| 2522 | if (likely(ac->avail)) { | 2518 | if (likely(ac->avail)) { |
| 2523 | STATS_INC_ALLOCHIT(cachep); | 2519 | STATS_INC_ALLOCHIT(cachep); |
| @@ -2527,6 +2523,18 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl | |||
| 2527 | STATS_INC_ALLOCMISS(cachep); | 2523 | STATS_INC_ALLOCMISS(cachep); |
| 2528 | objp = cache_alloc_refill(cachep, flags); | 2524 | objp = cache_alloc_refill(cachep, flags); |
| 2529 | } | 2525 | } |
| 2526 | return objp; | ||
| 2527 | } | ||
| 2528 | |||
| 2529 | static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | ||
| 2530 | { | ||
| 2531 | unsigned long save_flags; | ||
| 2532 | void* objp; | ||
| 2533 | |||
| 2534 | cache_alloc_debugcheck_before(cachep, flags); | ||
| 2535 | |||
| 2536 | local_irq_save(save_flags); | ||
| 2537 | objp = ____cache_alloc(cachep, flags); | ||
| 2530 | local_irq_restore(save_flags); | 2538 | local_irq_restore(save_flags); |
| 2531 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 2539 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
| 2532 | __builtin_return_address(0)); | 2540 | __builtin_return_address(0)); |
| @@ -2608,7 +2616,7 @@ done: | |||
| 2608 | /* | 2616 | /* |
| 2609 | * Caller needs to acquire correct kmem_list's list_lock | 2617 | * Caller needs to acquire correct kmem_list's list_lock |
| 2610 | */ | 2618 | */ |
| 2611 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) | 2619 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) |
| 2612 | { | 2620 | { |
| 2613 | int i; | 2621 | int i; |
| 2614 | struct kmem_list3 *l3; | 2622 | struct kmem_list3 *l3; |
| @@ -2617,14 +2625,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) | |||
| 2617 | void *objp = objpp[i]; | 2625 | void *objp = objpp[i]; |
| 2618 | struct slab *slabp; | 2626 | struct slab *slabp; |
| 2619 | unsigned int objnr; | 2627 | unsigned int objnr; |
| 2620 | int nodeid = 0; | ||
| 2621 | 2628 | ||
| 2622 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); | 2629 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); |
| 2623 | nodeid = slabp->nodeid; | 2630 | l3 = cachep->nodelists[node]; |
| 2624 | l3 = cachep->nodelists[nodeid]; | ||
| 2625 | list_del(&slabp->list); | 2631 | list_del(&slabp->list); |
| 2626 | objnr = (objp - slabp->s_mem) / cachep->objsize; | 2632 | objnr = (objp - slabp->s_mem) / cachep->objsize; |
| 2627 | check_spinlock_acquired_node(cachep, nodeid); | 2633 | check_spinlock_acquired_node(cachep, node); |
| 2628 | check_slabp(cachep, slabp); | 2634 | check_slabp(cachep, slabp); |
| 2629 | 2635 | ||
| 2630 | 2636 | ||
| @@ -2664,13 +2670,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
| 2664 | { | 2670 | { |
| 2665 | int batchcount; | 2671 | int batchcount; |
| 2666 | struct kmem_list3 *l3; | 2672 | struct kmem_list3 *l3; |
| 2673 | int node = numa_node_id(); | ||
| 2667 | 2674 | ||
| 2668 | batchcount = ac->batchcount; | 2675 | batchcount = ac->batchcount; |
| 2669 | #if DEBUG | 2676 | #if DEBUG |
| 2670 | BUG_ON(!batchcount || batchcount > ac->avail); | 2677 | BUG_ON(!batchcount || batchcount > ac->avail); |
| 2671 | #endif | 2678 | #endif |
| 2672 | check_irq_off(); | 2679 | check_irq_off(); |
| 2673 | l3 = cachep->nodelists[numa_node_id()]; | 2680 | l3 = cachep->nodelists[node]; |
| 2674 | spin_lock(&l3->list_lock); | 2681 | spin_lock(&l3->list_lock); |
| 2675 | if (l3->shared) { | 2682 | if (l3->shared) { |
| 2676 | struct array_cache *shared_array = l3->shared; | 2683 | struct array_cache *shared_array = l3->shared; |
| @@ -2686,7 +2693,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
| 2686 | } | 2693 | } |
| 2687 | } | 2694 | } |
| 2688 | 2695 | ||
| 2689 | free_block(cachep, ac->entry, batchcount); | 2696 | free_block(cachep, ac->entry, batchcount, node); |
| 2690 | free_done: | 2697 | free_done: |
| 2691 | #if STATS | 2698 | #if STATS |
| 2692 | { | 2699 | { |
| @@ -2751,7 +2758,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
| 2751 | } else { | 2758 | } else { |
| 2752 | spin_lock(&(cachep->nodelists[nodeid])-> | 2759 | spin_lock(&(cachep->nodelists[nodeid])-> |
| 2753 | list_lock); | 2760 | list_lock); |
| 2754 | free_block(cachep, &objp, 1); | 2761 | free_block(cachep, &objp, 1, nodeid); |
| 2755 | spin_unlock(&(cachep->nodelists[nodeid])-> | 2762 | spin_unlock(&(cachep->nodelists[nodeid])-> |
| 2756 | list_lock); | 2763 | list_lock); |
| 2757 | } | 2764 | } |
| @@ -2778,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
| 2778 | * Allocate an object from this cache. The flags are only relevant | 2785 | * Allocate an object from this cache. The flags are only relevant |
| 2779 | * if the cache has no available objects. | 2786 | * if the cache has no available objects. |
| 2780 | */ | 2787 | */ |
| 2781 | void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) | 2788 | void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) |
| 2782 | { | 2789 | { |
| 2783 | return __cache_alloc(cachep, flags); | 2790 | return __cache_alloc(cachep, flags); |
| 2784 | } | 2791 | } |
| @@ -2839,12 +2846,12 @@ out: | |||
| 2839 | * New and improved: it will now make sure that the object gets | 2846 | * New and improved: it will now make sure that the object gets |
| 2840 | * put on the correct node list so that there is no false sharing. | 2847 | * put on the correct node list so that there is no false sharing. |
| 2841 | */ | 2848 | */ |
| 2842 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 2849 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 2843 | { | 2850 | { |
| 2844 | unsigned long save_flags; | 2851 | unsigned long save_flags; |
| 2845 | void *ptr; | 2852 | void *ptr; |
| 2846 | 2853 | ||
| 2847 | if (nodeid == numa_node_id() || nodeid == -1) | 2854 | if (nodeid == -1) |
| 2848 | return __cache_alloc(cachep, flags); | 2855 | return __cache_alloc(cachep, flags); |
| 2849 | 2856 | ||
| 2850 | if (unlikely(!cachep->nodelists[nodeid])) { | 2857 | if (unlikely(!cachep->nodelists[nodeid])) { |
| @@ -2855,7 +2862,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
| 2855 | 2862 | ||
| 2856 | cache_alloc_debugcheck_before(cachep, flags); | 2863 | cache_alloc_debugcheck_before(cachep, flags); |
| 2857 | local_irq_save(save_flags); | 2864 | local_irq_save(save_flags); |
| 2858 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 2865 | if (nodeid == numa_node_id()) |
| 2866 | ptr = ____cache_alloc(cachep, flags); | ||
| 2867 | else | ||
| 2868 | ptr = __cache_alloc_node(cachep, flags, nodeid); | ||
| 2859 | local_irq_restore(save_flags); | 2869 | local_irq_restore(save_flags); |
| 2860 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); | 2870 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); |
| 2861 | 2871 | ||
| @@ -2863,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
| 2863 | } | 2873 | } |
| 2864 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 2874 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 2865 | 2875 | ||
| 2866 | void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) | 2876 | void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 2867 | { | 2877 | { |
| 2868 | kmem_cache_t *cachep; | 2878 | kmem_cache_t *cachep; |
| 2869 | 2879 | ||
| @@ -2896,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node); | |||
| 2896 | * platforms. For example, on i386, it means that the memory must come | 2906 | * platforms. For example, on i386, it means that the memory must come |
| 2897 | * from the first 16MB. | 2907 | * from the first 16MB. |
| 2898 | */ | 2908 | */ |
| 2899 | void *__kmalloc(size_t size, unsigned int __nocast flags) | 2909 | void *__kmalloc(size_t size, gfp_t flags) |
| 2900 | { | 2910 | { |
| 2901 | kmem_cache_t *cachep; | 2911 | kmem_cache_t *cachep; |
| 2902 | 2912 | ||
| @@ -2985,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free); | |||
| 2985 | * @size: how many bytes of memory are required. | 2995 | * @size: how many bytes of memory are required. |
| 2986 | * @flags: the type of memory to allocate. | 2996 | * @flags: the type of memory to allocate. |
| 2987 | */ | 2997 | */ |
| 2988 | void *kzalloc(size_t size, unsigned int __nocast flags) | 2998 | void *kzalloc(size_t size, gfp_t flags) |
| 2989 | { | 2999 | { |
| 2990 | void *ret = kmalloc(size, flags); | 3000 | void *ret = kmalloc(size, flags); |
| 2991 | if (ret) | 3001 | if (ret) |
| @@ -3079,7 +3089,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep) | |||
| 3079 | 3089 | ||
| 3080 | if ((nc = cachep->nodelists[node]->shared)) | 3090 | if ((nc = cachep->nodelists[node]->shared)) |
| 3081 | free_block(cachep, nc->entry, | 3091 | free_block(cachep, nc->entry, |
| 3082 | nc->avail); | 3092 | nc->avail, node); |
| 3083 | 3093 | ||
| 3084 | l3->shared = new; | 3094 | l3->shared = new; |
| 3085 | if (!cachep->nodelists[node]->alien) { | 3095 | if (!cachep->nodelists[node]->alien) { |
| @@ -3160,7 +3170,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, | |||
| 3160 | if (!ccold) | 3170 | if (!ccold) |
| 3161 | continue; | 3171 | continue; |
| 3162 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3172 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
| 3163 | free_block(cachep, ccold->entry, ccold->avail); | 3173 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); |
| 3164 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3174 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
| 3165 | kfree(ccold); | 3175 | kfree(ccold); |
| 3166 | } | 3176 | } |
| @@ -3240,7 +3250,7 @@ static void drain_array_locked(kmem_cache_t *cachep, | |||
| 3240 | if (tofree > ac->avail) { | 3250 | if (tofree > ac->avail) { |
| 3241 | tofree = (ac->avail+1)/2; | 3251 | tofree = (ac->avail+1)/2; |
| 3242 | } | 3252 | } |
| 3243 | free_block(cachep, ac->entry, tofree); | 3253 | free_block(cachep, ac->entry, tofree, node); |
| 3244 | ac->avail -= tofree; | 3254 | ac->avail -= tofree; |
| 3245 | memmove(ac->entry, &(ac->entry[tofree]), | 3255 | memmove(ac->entry, &(ac->entry[tofree]), |
| 3246 | sizeof(void*)*ac->avail); | 3256 | sizeof(void*)*ac->avail); |
| @@ -3591,7 +3601,7 @@ unsigned int ksize(const void *objp) | |||
| 3591 | * @s: the string to duplicate | 3601 | * @s: the string to duplicate |
| 3592 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 3602 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
| 3593 | */ | 3603 | */ |
| 3594 | char *kstrdup(const char *s, unsigned int __nocast gfp) | 3604 | char *kstrdup(const char *s, gfp_t gfp) |
| 3595 | { | 3605 | { |
| 3596 | size_t len; | 3606 | size_t len; |
| 3597 | char *buf; | 3607 | char *buf; |
diff --git a/mm/swap_state.c b/mm/swap_state.c index adbc2b426c2f..132164f7d0a7 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -68,7 +68,7 @@ void show_swap_cache_info(void) | |||
| 68 | * but sets SwapCache flag and private instead of mapping and index. | 68 | * but sets SwapCache flag and private instead of mapping and index. |
| 69 | */ | 69 | */ |
| 70 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry, | 70 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry, |
| 71 | unsigned int __nocast gfp_mask) | 71 | gfp_t gfp_mask) |
| 72 | { | 72 | { |
| 73 | int error; | 73 | int error; |
| 74 | 74 | ||
diff --git a/mm/swapfile.c b/mm/swapfile.c index 0184f510aace..1dcaeda039f4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) | |||
| 1381 | error = bd_claim(bdev, sys_swapon); | 1381 | error = bd_claim(bdev, sys_swapon); |
| 1382 | if (error < 0) { | 1382 | if (error < 0) { |
| 1383 | bdev = NULL; | 1383 | bdev = NULL; |
| 1384 | error = -EINVAL; | ||
| 1384 | goto bad_swap; | 1385 | goto bad_swap; |
| 1385 | } | 1386 | } |
| 1386 | p->old_block_size = block_size(bdev); | 1387 | p->old_block_size = block_size(bdev); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 13c3d82968ae..1150229b6366 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -395,7 +395,7 @@ void *vmap(struct page **pages, unsigned int count, | |||
| 395 | 395 | ||
| 396 | EXPORT_SYMBOL(vmap); | 396 | EXPORT_SYMBOL(vmap); |
| 397 | 397 | ||
| 398 | void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) | 398 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) |
| 399 | { | 399 | { |
| 400 | struct page **pages; | 400 | struct page **pages; |
| 401 | unsigned int nr_pages, array_size, i; | 401 | unsigned int nr_pages, array_size, i; |
| @@ -446,7 +446,7 @@ fail: | |||
| 446 | * allocator with @gfp_mask flags. Map them into contiguous | 446 | * allocator with @gfp_mask flags. Map them into contiguous |
| 447 | * kernel virtual space, using a pagetable protection of @prot. | 447 | * kernel virtual space, using a pagetable protection of @prot. |
| 448 | */ | 448 | */ |
| 449 | void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) | 449 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
| 450 | { | 450 | { |
| 451 | struct vm_struct *area; | 451 | struct vm_struct *area; |
| 452 | 452 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 0ea71e887bb6..64f9570cff56 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -511,10 +511,11 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
| 511 | * PageDirty _after_ making sure that the page is freeable and | 511 | * PageDirty _after_ making sure that the page is freeable and |
| 512 | * not in use by anybody. (pagecache + us == 2) | 512 | * not in use by anybody. (pagecache + us == 2) |
| 513 | */ | 513 | */ |
| 514 | if (page_count(page) != 2 || PageDirty(page)) { | 514 | if (unlikely(page_count(page) != 2)) |
| 515 | write_unlock_irq(&mapping->tree_lock); | 515 | goto cannot_free; |
| 516 | goto keep_locked; | 516 | smp_rmb(); |
| 517 | } | 517 | if (unlikely(PageDirty(page))) |
| 518 | goto cannot_free; | ||
| 518 | 519 | ||
| 519 | #ifdef CONFIG_SWAP | 520 | #ifdef CONFIG_SWAP |
| 520 | if (PageSwapCache(page)) { | 521 | if (PageSwapCache(page)) { |
| @@ -538,6 +539,10 @@ free_it: | |||
| 538 | __pagevec_release_nonlru(&freed_pvec); | 539 | __pagevec_release_nonlru(&freed_pvec); |
| 539 | continue; | 540 | continue; |
| 540 | 541 | ||
| 542 | cannot_free: | ||
| 543 | write_unlock_irq(&mapping->tree_lock); | ||
| 544 | goto keep_locked; | ||
| 545 | |||
| 541 | activate_locked: | 546 | activate_locked: |
| 542 | SetPageActive(page); | 547 | SetPageActive(page); |
| 543 | pgactivate++; | 548 | pgactivate++; |
