diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-06-16 05:24:00 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-06-16 05:24:00 -0400 |
| commit | 6d72b7952fa7d7c61d021398970c29afde6a4443 (patch) | |
| tree | 31c00be8e2837e2db2e62c694421a93a9f4c79d7 /mm | |
| parent | 6360b1fbb4a939efd34fc770c2ebd927c55506e0 (diff) | |
| parent | 066519068ad2fbe98c7f45552b1f592903a9c8c8 (diff) | |
Merge branch 'linus' into core/rodata
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/hugetlb.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 2 | ||||
| -rw-r--r-- | mm/mmap.c | 12 | ||||
| -rw-r--r-- | mm/nommu.c | 21 | ||||
| -rw-r--r-- | mm/page_alloc.c | 42 | ||||
| -rw-r--r-- | mm/pagewalk.c | 42 | ||||
| -rw-r--r-- | mm/slob.c | 5 | ||||
| -rw-r--r-- | mm/slub.c | 5 | ||||
| -rw-r--r-- | mm/swap.c | 4 | ||||
| -rw-r--r-- | mm/vmscan.c | 2 |
10 files changed, 64 insertions, 73 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bbf953eeb58b..ab171274ef21 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -785,7 +785,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
| 785 | continue; | 785 | continue; |
| 786 | 786 | ||
| 787 | spin_lock(&dst->page_table_lock); | 787 | spin_lock(&dst->page_table_lock); |
| 788 | spin_lock(&src->page_table_lock); | 788 | spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); |
| 789 | if (!huge_pte_none(huge_ptep_get(src_pte))) { | 789 | if (!huge_pte_none(huge_ptep_get(src_pte))) { |
| 790 | if (cow) | 790 | if (cow) |
| 791 | huge_ptep_set_wrprotect(src, addr, src_pte); | 791 | huge_ptep_set_wrprotect(src, addr, src_pte); |
diff --git a/mm/memory.c b/mm/memory.c index fb5608a120ed..19e0ae9beecb 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2295,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2295 | vmf.flags = flags; | 2295 | vmf.flags = flags; |
| 2296 | vmf.page = NULL; | 2296 | vmf.page = NULL; |
| 2297 | 2297 | ||
| 2298 | BUG_ON(vma->vm_flags & VM_PFNMAP); | ||
| 2299 | |||
| 2300 | ret = vma->vm_ops->fault(vma, &vmf); | 2298 | ret = vma->vm_ops->fault(vma, &vmf); |
| 2301 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) | 2299 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) |
| 2302 | return ret; | 2300 | return ret; |
| @@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot); | |||
| 80 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 80 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
| 81 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 81 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
| 82 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 82 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
| 83 | atomic_t vm_committed_space = ATOMIC_INIT(0); | 83 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
| 84 | 84 | ||
| 85 | /* | 85 | /* |
| 86 | * Check that a process has enough memory to allocate a new virtual | 86 | * Check that a process has enough memory to allocate a new virtual |
| @@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
| 177 | * cast `allowed' as a signed long because vm_committed_space | 177 | * cast `allowed' as a signed long because vm_committed_space |
| 178 | * sometimes has a negative value | 178 | * sometimes has a negative value |
| 179 | */ | 179 | */ |
| 180 | if (atomic_read(&vm_committed_space) < (long)allowed) | 180 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
| 181 | return 0; | 181 | return 0; |
| 182 | error: | 182 | error: |
| 183 | vm_unacct_memory(pages); | 183 | vm_unacct_memory(pages); |
| @@ -245,10 +245,16 @@ asmlinkage unsigned long sys_brk(unsigned long brk) | |||
| 245 | unsigned long rlim, retval; | 245 | unsigned long rlim, retval; |
| 246 | unsigned long newbrk, oldbrk; | 246 | unsigned long newbrk, oldbrk; |
| 247 | struct mm_struct *mm = current->mm; | 247 | struct mm_struct *mm = current->mm; |
| 248 | unsigned long min_brk; | ||
| 248 | 249 | ||
| 249 | down_write(&mm->mmap_sem); | 250 | down_write(&mm->mmap_sem); |
| 250 | 251 | ||
| 251 | if (brk < mm->start_brk) | 252 | #ifdef CONFIG_COMPAT_BRK |
| 253 | min_brk = mm->end_code; | ||
| 254 | #else | ||
| 255 | min_brk = mm->start_brk; | ||
| 256 | #endif | ||
| 257 | if (brk < min_brk) | ||
| 252 | goto out; | 258 | goto out; |
| 253 | 259 | ||
| 254 | /* | 260 | /* |
diff --git a/mm/nommu.c b/mm/nommu.c index ef8c62cec697..4462b6a3fcb9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -39,7 +39,7 @@ struct page *mem_map; | |||
| 39 | unsigned long max_mapnr; | 39 | unsigned long max_mapnr; |
| 40 | unsigned long num_physpages; | 40 | unsigned long num_physpages; |
| 41 | unsigned long askedalloc, realalloc; | 41 | unsigned long askedalloc, realalloc; |
| 42 | atomic_t vm_committed_space = ATOMIC_INIT(0); | 42 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
| 43 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 43 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
| 44 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 44 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
| 45 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 45 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; |
| @@ -109,16 +109,23 @@ unsigned int kobjsize(const void *objp) | |||
| 109 | * If the object we have should not have ksize performed on it, | 109 | * If the object we have should not have ksize performed on it, |
| 110 | * return size of 0 | 110 | * return size of 0 |
| 111 | */ | 111 | */ |
| 112 | if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp)))) | 112 | if (!objp || !virt_addr_valid(objp)) |
| 113 | return 0; | 113 | return 0; |
| 114 | 114 | ||
| 115 | page = virt_to_head_page(objp); | ||
| 116 | |||
| 117 | /* | ||
| 118 | * If the allocator sets PageSlab, we know the pointer came from | ||
| 119 | * kmalloc(). | ||
| 120 | */ | ||
| 115 | if (PageSlab(page)) | 121 | if (PageSlab(page)) |
| 116 | return ksize(objp); | 122 | return ksize(objp); |
| 117 | 123 | ||
| 118 | BUG_ON(page->index < 0); | 124 | /* |
| 119 | BUG_ON(page->index >= MAX_ORDER); | 125 | * The ksize() function is only guaranteed to work for pointers |
| 120 | 126 | * returned by kmalloc(). So handle arbitrary pointers here. | |
| 121 | return (PAGE_SIZE << page->index); | 127 | */ |
| 128 | return PAGE_SIZE << compound_order(page); | ||
| 122 | } | 129 | } |
| 123 | 130 | ||
| 124 | /* | 131 | /* |
| @@ -1410,7 +1417,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
| 1410 | * cast `allowed' as a signed long because vm_committed_space | 1417 | * cast `allowed' as a signed long because vm_committed_space |
| 1411 | * sometimes has a negative value | 1418 | * sometimes has a negative value |
| 1412 | */ | 1419 | */ |
| 1413 | if (atomic_read(&vm_committed_space) < (long)allowed) | 1420 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
| 1414 | return 0; | 1421 | return 0; |
| 1415 | error: | 1422 | error: |
| 1416 | vm_unacct_memory(pages); | 1423 | vm_unacct_memory(pages); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 63835579323a..2f552955a02f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -237,16 +237,7 @@ static void bad_page(struct page *page) | |||
| 237 | printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" | 237 | printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" |
| 238 | KERN_EMERG "Backtrace:\n"); | 238 | KERN_EMERG "Backtrace:\n"); |
| 239 | dump_stack(); | 239 | dump_stack(); |
| 240 | page->flags &= ~(1 << PG_lru | | 240 | page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD; |
| 241 | 1 << PG_private | | ||
| 242 | 1 << PG_locked | | ||
| 243 | 1 << PG_active | | ||
| 244 | 1 << PG_dirty | | ||
| 245 | 1 << PG_reclaim | | ||
| 246 | 1 << PG_slab | | ||
| 247 | 1 << PG_swapcache | | ||
| 248 | 1 << PG_writeback | | ||
| 249 | 1 << PG_buddy ); | ||
| 250 | set_page_count(page, 0); | 241 | set_page_count(page, 0); |
| 251 | reset_page_mapcount(page); | 242 | reset_page_mapcount(page); |
| 252 | page->mapping = NULL; | 243 | page->mapping = NULL; |
| @@ -463,16 +454,7 @@ static inline int free_pages_check(struct page *page) | |||
| 463 | (page->mapping != NULL) | | 454 | (page->mapping != NULL) | |
| 464 | (page_get_page_cgroup(page) != NULL) | | 455 | (page_get_page_cgroup(page) != NULL) | |
| 465 | (page_count(page) != 0) | | 456 | (page_count(page) != 0) | |
| 466 | (page->flags & ( | 457 | (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) |
| 467 | 1 << PG_lru | | ||
| 468 | 1 << PG_private | | ||
| 469 | 1 << PG_locked | | ||
| 470 | 1 << PG_active | | ||
| 471 | 1 << PG_slab | | ||
| 472 | 1 << PG_swapcache | | ||
| 473 | 1 << PG_writeback | | ||
| 474 | 1 << PG_reserved | | ||
| 475 | 1 << PG_buddy )))) | ||
| 476 | bad_page(page); | 458 | bad_page(page); |
| 477 | if (PageDirty(page)) | 459 | if (PageDirty(page)) |
| 478 | __ClearPageDirty(page); | 460 | __ClearPageDirty(page); |
| @@ -616,17 +598,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
| 616 | (page->mapping != NULL) | | 598 | (page->mapping != NULL) | |
| 617 | (page_get_page_cgroup(page) != NULL) | | 599 | (page_get_page_cgroup(page) != NULL) | |
| 618 | (page_count(page) != 0) | | 600 | (page_count(page) != 0) | |
| 619 | (page->flags & ( | 601 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) |
| 620 | 1 << PG_lru | | ||
| 621 | 1 << PG_private | | ||
| 622 | 1 << PG_locked | | ||
| 623 | 1 << PG_active | | ||
| 624 | 1 << PG_dirty | | ||
| 625 | 1 << PG_slab | | ||
| 626 | 1 << PG_swapcache | | ||
| 627 | 1 << PG_writeback | | ||
| 628 | 1 << PG_reserved | | ||
| 629 | 1 << PG_buddy )))) | ||
| 630 | bad_page(page); | 602 | bad_page(page); |
| 631 | 603 | ||
| 632 | /* | 604 | /* |
| @@ -1396,6 +1368,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, | |||
| 1396 | 1368 | ||
| 1397 | (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, | 1369 | (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, |
| 1398 | &preferred_zone); | 1370 | &preferred_zone); |
| 1371 | if (!preferred_zone) | ||
| 1372 | return NULL; | ||
| 1373 | |||
| 1399 | classzone_idx = zone_idx(preferred_zone); | 1374 | classzone_idx = zone_idx(preferred_zone); |
| 1400 | 1375 | ||
| 1401 | zonelist_scan: | 1376 | zonelist_scan: |
| @@ -2804,7 +2779,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | |||
| 2804 | alloc_size = zone->wait_table_hash_nr_entries | 2779 | alloc_size = zone->wait_table_hash_nr_entries |
| 2805 | * sizeof(wait_queue_head_t); | 2780 | * sizeof(wait_queue_head_t); |
| 2806 | 2781 | ||
| 2807 | if (system_state == SYSTEM_BOOTING) { | 2782 | if (!slab_is_available()) { |
| 2808 | zone->wait_table = (wait_queue_head_t *) | 2783 | zone->wait_table = (wait_queue_head_t *) |
| 2809 | alloc_bootmem_node(pgdat, alloc_size); | 2784 | alloc_bootmem_node(pgdat, alloc_size); |
| 2810 | } else { | 2785 | } else { |
| @@ -3378,7 +3353,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
| 3378 | * is used by this zone for memmap. This affects the watermark | 3353 | * is used by this zone for memmap. This affects the watermark |
| 3379 | * and per-cpu initialisations | 3354 | * and per-cpu initialisations |
| 3380 | */ | 3355 | */ |
| 3381 | memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; | 3356 | memmap_pages = |
| 3357 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; | ||
| 3382 | if (realsize >= memmap_pages) { | 3358 | if (realsize >= memmap_pages) { |
| 3383 | realsize -= memmap_pages; | 3359 | realsize -= memmap_pages; |
| 3384 | printk(KERN_DEBUG | 3360 | printk(KERN_DEBUG |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 0afd2387e507..d5878bed7841 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -3,14 +3,14 @@ | |||
| 3 | #include <linux/sched.h> | 3 | #include <linux/sched.h> |
| 4 | 4 | ||
| 5 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | 5 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
| 6 | const struct mm_walk *walk, void *private) | 6 | struct mm_walk *walk) |
| 7 | { | 7 | { |
| 8 | pte_t *pte; | 8 | pte_t *pte; |
| 9 | int err = 0; | 9 | int err = 0; |
| 10 | 10 | ||
| 11 | pte = pte_offset_map(pmd, addr); | 11 | pte = pte_offset_map(pmd, addr); |
| 12 | for (;;) { | 12 | for (;;) { |
| 13 | err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private); | 13 | err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); |
| 14 | if (err) | 14 | if (err) |
| 15 | break; | 15 | break; |
| 16 | addr += PAGE_SIZE; | 16 | addr += PAGE_SIZE; |
| @@ -24,7 +24,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | 26 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, |
| 27 | const struct mm_walk *walk, void *private) | 27 | struct mm_walk *walk) |
| 28 | { | 28 | { |
| 29 | pmd_t *pmd; | 29 | pmd_t *pmd; |
| 30 | unsigned long next; | 30 | unsigned long next; |
| @@ -35,15 +35,15 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | |||
| 35 | next = pmd_addr_end(addr, end); | 35 | next = pmd_addr_end(addr, end); |
| 36 | if (pmd_none_or_clear_bad(pmd)) { | 36 | if (pmd_none_or_clear_bad(pmd)) { |
| 37 | if (walk->pte_hole) | 37 | if (walk->pte_hole) |
| 38 | err = walk->pte_hole(addr, next, private); | 38 | err = walk->pte_hole(addr, next, walk); |
| 39 | if (err) | 39 | if (err) |
| 40 | break; | 40 | break; |
| 41 | continue; | 41 | continue; |
| 42 | } | 42 | } |
| 43 | if (walk->pmd_entry) | 43 | if (walk->pmd_entry) |
| 44 | err = walk->pmd_entry(pmd, addr, next, private); | 44 | err = walk->pmd_entry(pmd, addr, next, walk); |
| 45 | if (!err && walk->pte_entry) | 45 | if (!err && walk->pte_entry) |
| 46 | err = walk_pte_range(pmd, addr, next, walk, private); | 46 | err = walk_pte_range(pmd, addr, next, walk); |
| 47 | if (err) | 47 | if (err) |
| 48 | break; | 48 | break; |
| 49 | } while (pmd++, addr = next, addr != end); | 49 | } while (pmd++, addr = next, addr != end); |
| @@ -52,7 +52,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | |||
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | 54 | static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, |
| 55 | const struct mm_walk *walk, void *private) | 55 | struct mm_walk *walk) |
| 56 | { | 56 | { |
| 57 | pud_t *pud; | 57 | pud_t *pud; |
| 58 | unsigned long next; | 58 | unsigned long next; |
| @@ -63,15 +63,15 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
| 63 | next = pud_addr_end(addr, end); | 63 | next = pud_addr_end(addr, end); |
| 64 | if (pud_none_or_clear_bad(pud)) { | 64 | if (pud_none_or_clear_bad(pud)) { |
| 65 | if (walk->pte_hole) | 65 | if (walk->pte_hole) |
| 66 | err = walk->pte_hole(addr, next, private); | 66 | err = walk->pte_hole(addr, next, walk); |
| 67 | if (err) | 67 | if (err) |
| 68 | break; | 68 | break; |
| 69 | continue; | 69 | continue; |
| 70 | } | 70 | } |
| 71 | if (walk->pud_entry) | 71 | if (walk->pud_entry) |
| 72 | err = walk->pud_entry(pud, addr, next, private); | 72 | err = walk->pud_entry(pud, addr, next, walk); |
| 73 | if (!err && (walk->pmd_entry || walk->pte_entry)) | 73 | if (!err && (walk->pmd_entry || walk->pte_entry)) |
| 74 | err = walk_pmd_range(pud, addr, next, walk, private); | 74 | err = walk_pmd_range(pud, addr, next, walk); |
| 75 | if (err) | 75 | if (err) |
| 76 | break; | 76 | break; |
| 77 | } while (pud++, addr = next, addr != end); | 77 | } while (pud++, addr = next, addr != end); |
| @@ -85,15 +85,15 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
| 85 | * @addr: starting address | 85 | * @addr: starting address |
| 86 | * @end: ending address | 86 | * @end: ending address |
| 87 | * @walk: set of callbacks to invoke for each level of the tree | 87 | * @walk: set of callbacks to invoke for each level of the tree |
| 88 | * @private: private data passed to the callback function | ||
| 89 | * | 88 | * |
| 90 | * Recursively walk the page table for the memory area in a VMA, | 89 | * Recursively walk the page table for the memory area in a VMA, |
| 91 | * calling supplied callbacks. Callbacks are called in-order (first | 90 | * calling supplied callbacks. Callbacks are called in-order (first |
| 92 | * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, | 91 | * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, |
| 93 | * etc.). If lower-level callbacks are omitted, walking depth is reduced. | 92 | * etc.). If lower-level callbacks are omitted, walking depth is reduced. |
| 94 | * | 93 | * |
| 95 | * Each callback receives an entry pointer, the start and end of the | 94 | * Each callback receives an entry pointer and the start and end of the |
| 96 | * associated range, and a caller-supplied private data pointer. | 95 | * associated range, and a copy of the original mm_walk for access to |
| 96 | * the ->private or ->mm fields. | ||
| 97 | * | 97 | * |
| 98 | * No locks are taken, but the bottom level iterator will map PTE | 98 | * No locks are taken, but the bottom level iterator will map PTE |
| 99 | * directories from highmem if necessary. | 99 | * directories from highmem if necessary. |
| @@ -101,9 +101,8 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
| 101 | * If any callback returns a non-zero value, the walk is aborted and | 101 | * If any callback returns a non-zero value, the walk is aborted and |
| 102 | * the return value is propagated back to the caller. Otherwise 0 is returned. | 102 | * the return value is propagated back to the caller. Otherwise 0 is returned. |
| 103 | */ | 103 | */ |
| 104 | int walk_page_range(const struct mm_struct *mm, | 104 | int walk_page_range(unsigned long addr, unsigned long end, |
| 105 | unsigned long addr, unsigned long end, | 105 | struct mm_walk *walk) |
| 106 | const struct mm_walk *walk, void *private) | ||
| 107 | { | 106 | { |
| 108 | pgd_t *pgd; | 107 | pgd_t *pgd; |
| 109 | unsigned long next; | 108 | unsigned long next; |
| @@ -112,21 +111,24 @@ int walk_page_range(const struct mm_struct *mm, | |||
| 112 | if (addr >= end) | 111 | if (addr >= end) |
| 113 | return err; | 112 | return err; |
| 114 | 113 | ||
| 115 | pgd = pgd_offset(mm, addr); | 114 | if (!walk->mm) |
| 115 | return -EINVAL; | ||
| 116 | |||
| 117 | pgd = pgd_offset(walk->mm, addr); | ||
| 116 | do { | 118 | do { |
| 117 | next = pgd_addr_end(addr, end); | 119 | next = pgd_addr_end(addr, end); |
| 118 | if (pgd_none_or_clear_bad(pgd)) { | 120 | if (pgd_none_or_clear_bad(pgd)) { |
| 119 | if (walk->pte_hole) | 121 | if (walk->pte_hole) |
| 120 | err = walk->pte_hole(addr, next, private); | 122 | err = walk->pte_hole(addr, next, walk); |
| 121 | if (err) | 123 | if (err) |
| 122 | break; | 124 | break; |
| 123 | continue; | 125 | continue; |
| 124 | } | 126 | } |
| 125 | if (walk->pgd_entry) | 127 | if (walk->pgd_entry) |
| 126 | err = walk->pgd_entry(pgd, addr, next, private); | 128 | err = walk->pgd_entry(pgd, addr, next, walk); |
| 127 | if (!err && | 129 | if (!err && |
| 128 | (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) | 130 | (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) |
| 129 | err = walk_pud_range(pgd, addr, next, walk, private); | 131 | err = walk_pud_range(pgd, addr, next, walk); |
| 130 | if (err) | 132 | if (err) |
| 131 | break; | 133 | break; |
| 132 | } while (pgd++, addr = next, addr != end); | 134 | } while (pgd++, addr = next, addr != end); |
| @@ -469,8 +469,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 469 | return ZERO_SIZE_PTR; | 469 | return ZERO_SIZE_PTR; |
| 470 | 470 | ||
| 471 | m = slob_alloc(size + align, gfp, align, node); | 471 | m = slob_alloc(size + align, gfp, align, node); |
| 472 | if (m) | 472 | if (!m) |
| 473 | *m = size; | 473 | return NULL; |
| 474 | *m = size; | ||
| 474 | return (void *)m + align; | 475 | return (void *)m + align; |
| 475 | } else { | 476 | } else { |
| 476 | void *ret; | 477 | void *ret; |
| @@ -2726,9 +2726,10 @@ size_t ksize(const void *object) | |||
| 2726 | 2726 | ||
| 2727 | page = virt_to_head_page(object); | 2727 | page = virt_to_head_page(object); |
| 2728 | 2728 | ||
| 2729 | if (unlikely(!PageSlab(page))) | 2729 | if (unlikely(!PageSlab(page))) { |
| 2730 | WARN_ON(!PageCompound(page)); | ||
| 2730 | return PAGE_SIZE << compound_order(page); | 2731 | return PAGE_SIZE << compound_order(page); |
| 2731 | 2732 | } | |
| 2732 | s = page->slab; | 2733 | s = page->slab; |
| 2733 | 2734 | ||
| 2734 | #ifdef CONFIG_SLUB_DEBUG | 2735 | #ifdef CONFIG_SLUB_DEBUG |
| @@ -503,7 +503,7 @@ void vm_acct_memory(long pages) | |||
| 503 | local = &__get_cpu_var(committed_space); | 503 | local = &__get_cpu_var(committed_space); |
| 504 | *local += pages; | 504 | *local += pages; |
| 505 | if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { | 505 | if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { |
| 506 | atomic_add(*local, &vm_committed_space); | 506 | atomic_long_add(*local, &vm_committed_space); |
| 507 | *local = 0; | 507 | *local = 0; |
| 508 | } | 508 | } |
| 509 | preempt_enable(); | 509 | preempt_enable(); |
| @@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, | |||
| 520 | 520 | ||
| 521 | committed = &per_cpu(committed_space, (long)hcpu); | 521 | committed = &per_cpu(committed_space, (long)hcpu); |
| 522 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 522 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
| 523 | atomic_add(*committed, &vm_committed_space); | 523 | atomic_long_add(*committed, &vm_committed_space); |
| 524 | *committed = 0; | 524 | *committed = 0; |
| 525 | drain_cpu_pagevecs((long)hcpu); | 525 | drain_cpu_pagevecs((long)hcpu); |
| 526 | } | 526 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 9a29901ad3b3..967d30ccd92b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -1307,7 +1307,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
| 1307 | struct scan_control *sc) | 1307 | struct scan_control *sc) |
| 1308 | { | 1308 | { |
| 1309 | int priority; | 1309 | int priority; |
| 1310 | int ret = 0; | 1310 | unsigned long ret = 0; |
| 1311 | unsigned long total_scanned = 0; | 1311 | unsigned long total_scanned = 0; |
| 1312 | unsigned long nr_reclaimed = 0; | 1312 | unsigned long nr_reclaimed = 0; |
| 1313 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1313 | struct reclaim_state *reclaim_state = current->reclaim_state; |
