diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/backing-dev.c | 9 | ||||
-rw-r--r-- | mm/bootmem.c | 13 | ||||
-rw-r--r-- | mm/bounce.c | 2 | ||||
-rw-r--r-- | mm/compaction.c | 7 | ||||
-rw-r--r-- | mm/fremap.c | 7 | ||||
-rw-r--r-- | mm/hugetlb.c | 24 | ||||
-rw-r--r-- | mm/ksm.c | 9 | ||||
-rw-r--r-- | mm/memblock.c | 837 | ||||
-rw-r--r-- | mm/memcontrol.c | 10 | ||||
-rw-r--r-- | mm/memory-failure.c | 12 | ||||
-rw-r--r-- | mm/memory.c | 43 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 18 | ||||
-rw-r--r-- | mm/mlock.c | 6 | ||||
-rw-r--r-- | mm/mmap.c | 1 | ||||
-rw-r--r-- | mm/mmzone.c | 21 | ||||
-rw-r--r-- | mm/oom_kill.c | 49 | ||||
-rw-r--r-- | mm/page_alloc.c | 123 | ||||
-rw-r--r-- | mm/percpu.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 23 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 11 | ||||
-rw-r--r-- | mm/swapfile.c | 129 | ||||
-rw-r--r-- | mm/vmalloc.c | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 43 | ||||
-rw-r--r-- | mm/vmstat.c | 16 |
25 files changed, 919 insertions, 507 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 01a57447a410..c2c8a4a11898 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -189,7 +189,7 @@ config COMPACTION | |||
189 | config MIGRATION | 189 | config MIGRATION |
190 | bool "Page migration" | 190 | bool "Page migration" |
191 | def_bool y | 191 | def_bool y |
192 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE | 192 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION |
193 | help | 193 | help |
194 | Allows the migration of the physical location of pages of processes | 194 | Allows the migration of the physical location of pages of processes |
195 | while the virtual addresses are not changed. This is useful in | 195 | while the virtual addresses are not changed. This is useful in |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index eaa4a5bbe063..65d420499a61 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info); | |||
30 | 30 | ||
31 | struct backing_dev_info noop_backing_dev_info = { | 31 | struct backing_dev_info noop_backing_dev_info = { |
32 | .name = "noop", | 32 | .name = "noop", |
33 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | ||
33 | }; | 34 | }; |
34 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | 35 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
35 | 36 | ||
@@ -243,6 +244,7 @@ static int __init default_bdi_init(void) | |||
243 | err = bdi_init(&default_backing_dev_info); | 244 | err = bdi_init(&default_backing_dev_info); |
244 | if (!err) | 245 | if (!err) |
245 | bdi_register(&default_backing_dev_info, NULL, "default"); | 246 | bdi_register(&default_backing_dev_info, NULL, "default"); |
247 | err = bdi_init(&noop_backing_dev_info); | ||
246 | 248 | ||
247 | return err; | 249 | return err; |
248 | } | 250 | } |
@@ -445,8 +447,8 @@ static int bdi_forker_thread(void *ptr) | |||
445 | switch (action) { | 447 | switch (action) { |
446 | case FORK_THREAD: | 448 | case FORK_THREAD: |
447 | __set_current_state(TASK_RUNNING); | 449 | __set_current_state(TASK_RUNNING); |
448 | task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", | 450 | task = kthread_create(bdi_writeback_thread, &bdi->wb, |
449 | dev_name(bdi->dev)); | 451 | "flush-%s", dev_name(bdi->dev)); |
450 | if (IS_ERR(task)) { | 452 | if (IS_ERR(task)) { |
451 | /* | 453 | /* |
452 | * If thread creation fails, force writeout of | 454 | * If thread creation fails, force writeout of |
@@ -457,10 +459,13 @@ static int bdi_forker_thread(void *ptr) | |||
457 | /* | 459 | /* |
458 | * The spinlock makes sure we do not lose | 460 | * The spinlock makes sure we do not lose |
459 | * wake-ups when racing with 'bdi_queue_work()'. | 461 | * wake-ups when racing with 'bdi_queue_work()'. |
462 | * And as soon as the bdi thread is visible, we | ||
463 | * can start it. | ||
460 | */ | 464 | */ |
461 | spin_lock_bh(&bdi->wb_lock); | 465 | spin_lock_bh(&bdi->wb_lock); |
462 | bdi->wb.task = task; | 466 | bdi->wb.task = task; |
463 | spin_unlock_bh(&bdi->wb_lock); | 467 | spin_unlock_bh(&bdi->wb_lock); |
468 | wake_up_process(task); | ||
464 | } | 469 | } |
465 | break; | 470 | break; |
466 | 471 | ||
diff --git a/mm/bootmem.c b/mm/bootmem.c index 142c84a54993..13b0caa9793c 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kmemleak.h> | 16 | #include <linux/kmemleak.h> |
17 | #include <linux/range.h> | 17 | #include <linux/range.h> |
18 | #include <linux/memblock.h> | ||
18 | 19 | ||
19 | #include <asm/bug.h> | 20 | #include <asm/bug.h> |
20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
@@ -434,7 +435,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
434 | unsigned long size) | 435 | unsigned long size) |
435 | { | 436 | { |
436 | #ifdef CONFIG_NO_BOOTMEM | 437 | #ifdef CONFIG_NO_BOOTMEM |
437 | free_early(physaddr, physaddr + size); | 438 | kmemleak_free_part(__va(physaddr), size); |
439 | memblock_x86_free_range(physaddr, physaddr + size); | ||
438 | #else | 440 | #else |
439 | unsigned long start, end; | 441 | unsigned long start, end; |
440 | 442 | ||
@@ -459,7 +461,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
459 | void __init free_bootmem(unsigned long addr, unsigned long size) | 461 | void __init free_bootmem(unsigned long addr, unsigned long size) |
460 | { | 462 | { |
461 | #ifdef CONFIG_NO_BOOTMEM | 463 | #ifdef CONFIG_NO_BOOTMEM |
462 | free_early(addr, addr + size); | 464 | kmemleak_free_part(__va(addr), size); |
465 | memblock_x86_free_range(addr, addr + size); | ||
463 | #else | 466 | #else |
464 | unsigned long start, end; | 467 | unsigned long start, end; |
465 | 468 | ||
@@ -526,6 +529,12 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, | |||
526 | } | 529 | } |
527 | 530 | ||
528 | #ifndef CONFIG_NO_BOOTMEM | 531 | #ifndef CONFIG_NO_BOOTMEM |
532 | int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | ||
533 | int flags) | ||
534 | { | ||
535 | return reserve_bootmem(phys, len, flags); | ||
536 | } | ||
537 | |||
529 | static unsigned long __init align_idx(struct bootmem_data *bdata, | 538 | static unsigned long __init align_idx(struct bootmem_data *bdata, |
530 | unsigned long idx, unsigned long step) | 539 | unsigned long idx, unsigned long step) |
531 | { | 540 | { |
diff --git a/mm/bounce.c b/mm/bounce.c index 13b6dad1eed2..1481de68184b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | |||
116 | */ | 116 | */ |
117 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | 117 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; |
118 | 118 | ||
119 | flush_dcache_page(tovec->bv_page); | ||
120 | bounce_copy_vec(tovec, vfrom); | 119 | bounce_copy_vec(tovec, vfrom); |
120 | flush_dcache_page(tovec->bv_page); | ||
121 | } | 121 | } |
122 | } | 122 | } |
123 | 123 | ||
diff --git a/mm/compaction.c b/mm/compaction.c index 94cce51b0b35..4d709ee59013 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) | |||
214 | /* Similar to reclaim, but different enough that they don't share logic */ | 214 | /* Similar to reclaim, but different enough that they don't share logic */ |
215 | static bool too_many_isolated(struct zone *zone) | 215 | static bool too_many_isolated(struct zone *zone) |
216 | { | 216 | { |
217 | 217 | unsigned long active, inactive, isolated; | |
218 | unsigned long inactive, isolated; | ||
219 | 218 | ||
220 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + | 219 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + |
221 | zone_page_state(zone, NR_INACTIVE_ANON); | 220 | zone_page_state(zone, NR_INACTIVE_ANON); |
221 | active = zone_page_state(zone, NR_ACTIVE_FILE) + | ||
222 | zone_page_state(zone, NR_ACTIVE_ANON); | ||
222 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + | 223 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + |
223 | zone_page_state(zone, NR_ISOLATED_ANON); | 224 | zone_page_state(zone, NR_ISOLATED_ANON); |
224 | 225 | ||
225 | return isolated > inactive; | 226 | return isolated > (inactive + active) / 2; |
226 | } | 227 | } |
227 | 228 | ||
228 | /* | 229 | /* |
diff --git a/mm/fremap.c b/mm/fremap.c index 46f5dacf90a2..ec520c7b28df 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
125 | { | 125 | { |
126 | struct mm_struct *mm = current->mm; | 126 | struct mm_struct *mm = current->mm; |
127 | struct address_space *mapping; | 127 | struct address_space *mapping; |
128 | unsigned long end = start + size; | ||
129 | struct vm_area_struct *vma; | 128 | struct vm_area_struct *vma; |
130 | int err = -EINVAL; | 129 | int err = -EINVAL; |
131 | int has_write_lock = 0; | 130 | int has_write_lock = 0; |
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
142 | if (start + size <= start) | 141 | if (start + size <= start) |
143 | return err; | 142 | return err; |
144 | 143 | ||
144 | /* Does pgoff wrap? */ | ||
145 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) | ||
146 | return err; | ||
147 | |||
145 | /* Can we represent this offset inside this architecture's pte's? */ | 148 | /* Can we represent this offset inside this architecture's pte's? */ |
146 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG | 149 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG |
147 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) | 150 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) |
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
168 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) | 171 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) |
169 | goto out; | 172 | goto out; |
170 | 173 | ||
171 | if (end <= start || start < vma->vm_start || end > vma->vm_end) | 174 | if (start < vma->vm_start || start + size > vma->vm_end) |
172 | goto out; | 175 | goto out; |
173 | 176 | ||
174 | /* Must set VM_NONLINEAR before any pages are populated. */ | 177 | /* Must set VM_NONLINEAR before any pages are populated. */ |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cc5be788a39f..c03273807182 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2324,11 +2324,8 @@ retry_avoidcopy: | |||
2324 | * and just make the page writable */ | 2324 | * and just make the page writable */ |
2325 | avoidcopy = (page_mapcount(old_page) == 1); | 2325 | avoidcopy = (page_mapcount(old_page) == 1); |
2326 | if (avoidcopy) { | 2326 | if (avoidcopy) { |
2327 | if (!trylock_page(old_page)) { | 2327 | if (PageAnon(old_page)) |
2328 | if (PageAnon(old_page)) | 2328 | page_move_anon_rmap(old_page, vma, address); |
2329 | page_move_anon_rmap(old_page, vma, address); | ||
2330 | } else | ||
2331 | unlock_page(old_page); | ||
2332 | set_huge_ptep_writable(vma, address, ptep); | 2329 | set_huge_ptep_writable(vma, address, ptep); |
2333 | return 0; | 2330 | return 0; |
2334 | } | 2331 | } |
@@ -2404,7 +2401,7 @@ retry_avoidcopy: | |||
2404 | set_huge_pte_at(mm, address, ptep, | 2401 | set_huge_pte_at(mm, address, ptep, |
2405 | make_huge_pte(vma, new_page, 1)); | 2402 | make_huge_pte(vma, new_page, 1)); |
2406 | page_remove_rmap(old_page); | 2403 | page_remove_rmap(old_page); |
2407 | hugepage_add_anon_rmap(new_page, vma, address); | 2404 | hugepage_add_new_anon_rmap(new_page, vma, address); |
2408 | /* Make the old page be freed below */ | 2405 | /* Make the old page be freed below */ |
2409 | new_page = old_page; | 2406 | new_page = old_page; |
2410 | mmu_notifier_invalidate_range_end(mm, | 2407 | mmu_notifier_invalidate_range_end(mm, |
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2631 | vma, address); | 2628 | vma, address); |
2632 | } | 2629 | } |
2633 | 2630 | ||
2634 | if (!pagecache_page) { | 2631 | /* |
2635 | page = pte_page(entry); | 2632 | * hugetlb_cow() requires page locks of pte_page(entry) and |
2633 | * pagecache_page, so here we need take the former one | ||
2634 | * when page != pagecache_page or !pagecache_page. | ||
2635 | * Note that locking order is always pagecache_page -> page, | ||
2636 | * so no worry about deadlock. | ||
2637 | */ | ||
2638 | page = pte_page(entry); | ||
2639 | if (page != pagecache_page) | ||
2636 | lock_page(page); | 2640 | lock_page(page); |
2637 | } | ||
2638 | 2641 | ||
2639 | spin_lock(&mm->page_table_lock); | 2642 | spin_lock(&mm->page_table_lock); |
2640 | /* Check for a racing update before calling hugetlb_cow */ | 2643 | /* Check for a racing update before calling hugetlb_cow */ |
@@ -2661,9 +2664,8 @@ out_page_table_lock: | |||
2661 | if (pagecache_page) { | 2664 | if (pagecache_page) { |
2662 | unlock_page(pagecache_page); | 2665 | unlock_page(pagecache_page); |
2663 | put_page(pagecache_page); | 2666 | put_page(pagecache_page); |
2664 | } else { | ||
2665 | unlock_page(page); | ||
2666 | } | 2667 | } |
2668 | unlock_page(page); | ||
2667 | 2669 | ||
2668 | out_mutex: | 2670 | out_mutex: |
2669 | mutex_unlock(&hugetlb_instantiation_mutex); | 2671 | mutex_unlock(&hugetlb_instantiation_mutex); |
@@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
712 | if (!ptep) | 712 | if (!ptep) |
713 | goto out; | 713 | goto out; |
714 | 714 | ||
715 | if (pte_write(*ptep)) { | 715 | if (pte_write(*ptep) || pte_dirty(*ptep)) { |
716 | pte_t entry; | 716 | pte_t entry; |
717 | 717 | ||
718 | swapped = PageSwapCache(page); | 718 | swapped = PageSwapCache(page); |
@@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
735 | set_pte_at(mm, addr, ptep, entry); | 735 | set_pte_at(mm, addr, ptep, entry); |
736 | goto out_unlock; | 736 | goto out_unlock; |
737 | } | 737 | } |
738 | entry = pte_wrprotect(entry); | 738 | if (pte_dirty(entry)) |
739 | set_page_dirty(page); | ||
740 | entry = pte_mkclean(pte_wrprotect(entry)); | ||
739 | set_pte_at_notify(mm, addr, ptep, entry); | 741 | set_pte_at_notify(mm, addr, ptep, entry); |
740 | } | 742 | } |
741 | *orig_pte = *ptep; | 743 | *orig_pte = *ptep; |
@@ -1504,8 +1506,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
1504 | { | 1506 | { |
1505 | struct page *new_page; | 1507 | struct page *new_page; |
1506 | 1508 | ||
1507 | unlock_page(page); /* any racers will COW it, not modify it */ | ||
1508 | |||
1509 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 1509 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
1510 | if (new_page) { | 1510 | if (new_page) { |
1511 | copy_user_highpage(new_page, page, address, vma); | 1511 | copy_user_highpage(new_page, page, address, vma); |
@@ -1521,7 +1521,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
1521 | add_page_to_unevictable_list(new_page); | 1521 | add_page_to_unevictable_list(new_page); |
1522 | } | 1522 | } |
1523 | 1523 | ||
1524 | page_cache_release(page); | ||
1525 | return new_page; | 1524 | return new_page; |
1526 | } | 1525 | } |
1527 | 1526 | ||
diff --git a/mm/memblock.c b/mm/memblock.c index 43840b305ecb..400dc62697d7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -11,237 +11,423 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/poison.h> | ||
18 | #include <linux/pfn.h> | ||
19 | #include <linux/debugfs.h> | ||
20 | #include <linux/seq_file.h> | ||
16 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
17 | 22 | ||
18 | #define MEMBLOCK_ALLOC_ANYWHERE 0 | 23 | struct memblock memblock __initdata_memblock; |
19 | 24 | ||
20 | struct memblock memblock; | 25 | int memblock_debug __initdata_memblock; |
26 | int memblock_can_resize __initdata_memblock; | ||
27 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | ||
28 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | ||
21 | 29 | ||
22 | static int memblock_debug; | 30 | /* inline so we don't get a warning when pr_debug is compiled out */ |
31 | static inline const char *memblock_type_name(struct memblock_type *type) | ||
32 | { | ||
33 | if (type == &memblock.memory) | ||
34 | return "memory"; | ||
35 | else if (type == &memblock.reserved) | ||
36 | return "reserved"; | ||
37 | else | ||
38 | return "unknown"; | ||
39 | } | ||
23 | 40 | ||
24 | static int __init early_memblock(char *p) | 41 | /* |
42 | * Address comparison utilities | ||
43 | */ | ||
44 | |||
45 | static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size) | ||
25 | { | 46 | { |
26 | if (p && strstr(p, "debug")) | 47 | return addr & ~(size - 1); |
27 | memblock_debug = 1; | 48 | } |
49 | |||
50 | static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size) | ||
51 | { | ||
52 | return (addr + (size - 1)) & ~(size - 1); | ||
53 | } | ||
54 | |||
55 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, | ||
56 | phys_addr_t base2, phys_addr_t size2) | ||
57 | { | ||
58 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | ||
59 | } | ||
60 | |||
61 | static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, | ||
62 | phys_addr_t base2, phys_addr_t size2) | ||
63 | { | ||
64 | if (base2 == base1 + size1) | ||
65 | return 1; | ||
66 | else if (base1 == base2 + size2) | ||
67 | return -1; | ||
68 | |||
28 | return 0; | 69 | return 0; |
29 | } | 70 | } |
30 | early_param("memblock", early_memblock); | ||
31 | 71 | ||
32 | static void memblock_dump(struct memblock_region *region, char *name) | 72 | static long __init_memblock memblock_regions_adjacent(struct memblock_type *type, |
73 | unsigned long r1, unsigned long r2) | ||
33 | { | 74 | { |
34 | unsigned long long base, size; | 75 | phys_addr_t base1 = type->regions[r1].base; |
35 | int i; | 76 | phys_addr_t size1 = type->regions[r1].size; |
77 | phys_addr_t base2 = type->regions[r2].base; | ||
78 | phys_addr_t size2 = type->regions[r2].size; | ||
36 | 79 | ||
37 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | 80 | return memblock_addrs_adjacent(base1, size1, base2, size2); |
81 | } | ||
38 | 82 | ||
39 | for (i = 0; i < region->cnt; i++) { | 83 | long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
40 | base = region->region[i].base; | 84 | { |
41 | size = region->region[i].size; | 85 | unsigned long i; |
42 | 86 | ||
43 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | 87 | for (i = 0; i < type->cnt; i++) { |
44 | name, i, base, base + size - 1, size); | 88 | phys_addr_t rgnbase = type->regions[i].base; |
89 | phys_addr_t rgnsize = type->regions[i].size; | ||
90 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | ||
91 | break; | ||
45 | } | 92 | } |
93 | |||
94 | return (i < type->cnt) ? i : -1; | ||
46 | } | 95 | } |
47 | 96 | ||
48 | void memblock_dump_all(void) | 97 | /* |
98 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | ||
99 | * are top-down. | ||
100 | */ | ||
101 | |||
102 | static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, | ||
103 | phys_addr_t size, phys_addr_t align) | ||
49 | { | 104 | { |
50 | if (!memblock_debug) | 105 | phys_addr_t base, res_base; |
51 | return; | 106 | long j; |
52 | 107 | ||
53 | pr_info("MEMBLOCK configuration:\n"); | 108 | /* In case, huge size is requested */ |
54 | pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); | 109 | if (end < size) |
55 | pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); | 110 | return MEMBLOCK_ERROR; |
56 | 111 | ||
57 | memblock_dump(&memblock.memory, "memory"); | 112 | base = memblock_align_down((end - size), align); |
58 | memblock_dump(&memblock.reserved, "reserved"); | 113 | |
114 | /* Prevent allocations returning 0 as it's also used to | ||
115 | * indicate an allocation failure | ||
116 | */ | ||
117 | if (start == 0) | ||
118 | start = PAGE_SIZE; | ||
119 | |||
120 | while (start <= base) { | ||
121 | j = memblock_overlaps_region(&memblock.reserved, base, size); | ||
122 | if (j < 0) | ||
123 | return base; | ||
124 | res_base = memblock.reserved.regions[j].base; | ||
125 | if (res_base < size) | ||
126 | break; | ||
127 | base = memblock_align_down(res_base - size, align); | ||
128 | } | ||
129 | |||
130 | return MEMBLOCK_ERROR; | ||
59 | } | 131 | } |
60 | 132 | ||
61 | static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, | 133 | static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, |
62 | u64 size2) | 134 | phys_addr_t align, phys_addr_t start, phys_addr_t end) |
63 | { | 135 | { |
64 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 136 | long i; |
137 | |||
138 | BUG_ON(0 == size); | ||
139 | |||
140 | size = memblock_align_up(size, align); | ||
141 | |||
142 | /* Pump up max_addr */ | ||
143 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | ||
144 | end = memblock.current_limit; | ||
145 | |||
146 | /* We do a top-down search, this tends to limit memory | ||
147 | * fragmentation by keeping early boot allocs near the | ||
148 | * top of memory | ||
149 | */ | ||
150 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | ||
151 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | ||
152 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | ||
153 | phys_addr_t bottom, top, found; | ||
154 | |||
155 | if (memblocksize < size) | ||
156 | continue; | ||
157 | if ((memblockbase + memblocksize) <= start) | ||
158 | break; | ||
159 | bottom = max(memblockbase, start); | ||
160 | top = min(memblockbase + memblocksize, end); | ||
161 | if (bottom >= top) | ||
162 | continue; | ||
163 | found = memblock_find_region(bottom, top, size, align); | ||
164 | if (found != MEMBLOCK_ERROR) | ||
165 | return found; | ||
166 | } | ||
167 | return MEMBLOCK_ERROR; | ||
65 | } | 168 | } |
66 | 169 | ||
67 | static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | 170 | /* |
171 | * Find a free area with specified alignment in a specific range. | ||
172 | */ | ||
173 | u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align) | ||
68 | { | 174 | { |
69 | if (base2 == base1 + size1) | 175 | return memblock_find_base(size, align, start, end); |
70 | return 1; | 176 | } |
71 | else if (base1 == base2 + size2) | ||
72 | return -1; | ||
73 | 177 | ||
74 | return 0; | 178 | /* |
179 | * Free memblock.reserved.regions | ||
180 | */ | ||
181 | int __init_memblock memblock_free_reserved_regions(void) | ||
182 | { | ||
183 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
184 | return 0; | ||
185 | |||
186 | return memblock_free(__pa(memblock.reserved.regions), | ||
187 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
75 | } | 188 | } |
76 | 189 | ||
77 | static long memblock_regions_adjacent(struct memblock_region *rgn, | 190 | /* |
78 | unsigned long r1, unsigned long r2) | 191 | * Reserve memblock.reserved.regions |
192 | */ | ||
193 | int __init_memblock memblock_reserve_reserved_regions(void) | ||
79 | { | 194 | { |
80 | u64 base1 = rgn->region[r1].base; | 195 | if (memblock.reserved.regions == memblock_reserved_init_regions) |
81 | u64 size1 = rgn->region[r1].size; | 196 | return 0; |
82 | u64 base2 = rgn->region[r2].base; | ||
83 | u64 size2 = rgn->region[r2].size; | ||
84 | 197 | ||
85 | return memblock_addrs_adjacent(base1, size1, base2, size2); | 198 | return memblock_reserve(__pa(memblock.reserved.regions), |
199 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
86 | } | 200 | } |
87 | 201 | ||
88 | static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) | 202 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
89 | { | 203 | { |
90 | unsigned long i; | 204 | unsigned long i; |
91 | 205 | ||
92 | for (i = r; i < rgn->cnt - 1; i++) { | 206 | for (i = r; i < type->cnt - 1; i++) { |
93 | rgn->region[i].base = rgn->region[i + 1].base; | 207 | type->regions[i].base = type->regions[i + 1].base; |
94 | rgn->region[i].size = rgn->region[i + 1].size; | 208 | type->regions[i].size = type->regions[i + 1].size; |
95 | } | 209 | } |
96 | rgn->cnt--; | 210 | type->cnt--; |
97 | } | 211 | } |
98 | 212 | ||
99 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 213 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
100 | static void memblock_coalesce_regions(struct memblock_region *rgn, | 214 | static void __init_memblock memblock_coalesce_regions(struct memblock_type *type, |
101 | unsigned long r1, unsigned long r2) | 215 | unsigned long r1, unsigned long r2) |
102 | { | 216 | { |
103 | rgn->region[r1].size += rgn->region[r2].size; | 217 | type->regions[r1].size += type->regions[r2].size; |
104 | memblock_remove_region(rgn, r2); | 218 | memblock_remove_region(type, r2); |
105 | } | 219 | } |
106 | 220 | ||
107 | void __init memblock_init(void) | 221 | /* Defined below but needed now */ |
222 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | ||
223 | |||
224 | static int __init_memblock memblock_double_array(struct memblock_type *type) | ||
108 | { | 225 | { |
109 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | 226 | struct memblock_region *new_array, *old_array; |
110 | * This simplifies the memblock_add() code below... | 227 | phys_addr_t old_size, new_size, addr; |
228 | int use_slab = slab_is_available(); | ||
229 | |||
230 | /* We don't allow resizing until we know about the reserved regions | ||
231 | * of memory that aren't suitable for allocation | ||
111 | */ | 232 | */ |
112 | memblock.memory.region[0].base = 0; | 233 | if (!memblock_can_resize) |
113 | memblock.memory.region[0].size = 0; | 234 | return -1; |
114 | memblock.memory.cnt = 1; | ||
115 | 235 | ||
116 | /* Ditto. */ | 236 | /* Calculate new doubled size */ |
117 | memblock.reserved.region[0].base = 0; | 237 | old_size = type->max * sizeof(struct memblock_region); |
118 | memblock.reserved.region[0].size = 0; | 238 | new_size = old_size << 1; |
119 | memblock.reserved.cnt = 1; | 239 | |
120 | } | 240 | /* Try to find some space for it. |
241 | * | ||
242 | * WARNING: We assume that either slab_is_available() and we use it or | ||
243 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | ||
244 | * when bootmem is currently active (unless bootmem itself is implemented | ||
245 | * on top of MEMBLOCK which isn't the case yet) | ||
246 | * | ||
247 | * This should however not be an issue for now, as we currently only | ||
248 | * call into MEMBLOCK while it's still active, or much later when slab is | ||
249 | * active for memory hotplug operations | ||
250 | */ | ||
251 | if (use_slab) { | ||
252 | new_array = kmalloc(new_size, GFP_KERNEL); | ||
253 | addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); | ||
254 | } else | ||
255 | addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); | ||
256 | if (addr == MEMBLOCK_ERROR) { | ||
257 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | ||
258 | memblock_type_name(type), type->max, type->max * 2); | ||
259 | return -1; | ||
260 | } | ||
261 | new_array = __va(addr); | ||
121 | 262 | ||
122 | void __init memblock_analyze(void) | 263 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
123 | { | 264 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); |
124 | int i; | ||
125 | 265 | ||
126 | memblock.memory.size = 0; | 266 | /* Found space, we now need to move the array over before |
267 | * we add the reserved region since it may be our reserved | ||
268 | * array itself that is full. | ||
269 | */ | ||
270 | memcpy(new_array, type->regions, old_size); | ||
271 | memset(new_array + type->max, 0, old_size); | ||
272 | old_array = type->regions; | ||
273 | type->regions = new_array; | ||
274 | type->max <<= 1; | ||
275 | |||
276 | /* If we use SLAB that's it, we are done */ | ||
277 | if (use_slab) | ||
278 | return 0; | ||
127 | 279 | ||
128 | for (i = 0; i < memblock.memory.cnt; i++) | 280 | /* Add the new reserved region now. Should not fail ! */ |
129 | memblock.memory.size += memblock.memory.region[i].size; | 281 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0); |
282 | |||
283 | /* If the array wasn't our static init one, then free it. We only do | ||
284 | * that before SLAB is available as later on, we don't know whether | ||
285 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | ||
286 | * anyways | ||
287 | */ | ||
288 | if (old_array != memblock_memory_init_regions && | ||
289 | old_array != memblock_reserved_init_regions) | ||
290 | memblock_free(__pa(old_array), old_size); | ||
291 | |||
292 | return 0; | ||
130 | } | 293 | } |
131 | 294 | ||
132 | static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) | 295 | extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, |
296 | phys_addr_t addr2, phys_addr_t size2) | ||
297 | { | ||
298 | return 1; | ||
299 | } | ||
300 | |||
301 | static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) | ||
133 | { | 302 | { |
134 | unsigned long coalesced = 0; | 303 | unsigned long coalesced = 0; |
135 | long adjacent, i; | 304 | long adjacent, i; |
136 | 305 | ||
137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | 306 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { |
138 | rgn->region[0].base = base; | 307 | type->regions[0].base = base; |
139 | rgn->region[0].size = size; | 308 | type->regions[0].size = size; |
140 | return 0; | 309 | return 0; |
141 | } | 310 | } |
142 | 311 | ||
143 | /* First try and coalesce this MEMBLOCK with another. */ | 312 | /* First try and coalesce this MEMBLOCK with another. */ |
144 | for (i = 0; i < rgn->cnt; i++) { | 313 | for (i = 0; i < type->cnt; i++) { |
145 | u64 rgnbase = rgn->region[i].base; | 314 | phys_addr_t rgnbase = type->regions[i].base; |
146 | u64 rgnsize = rgn->region[i].size; | 315 | phys_addr_t rgnsize = type->regions[i].size; |
147 | 316 | ||
148 | if ((rgnbase == base) && (rgnsize == size)) | 317 | if ((rgnbase == base) && (rgnsize == size)) |
149 | /* Already have this region, so we're done */ | 318 | /* Already have this region, so we're done */ |
150 | return 0; | 319 | return 0; |
151 | 320 | ||
152 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | 321 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); |
322 | /* Check if arch allows coalescing */ | ||
323 | if (adjacent != 0 && type == &memblock.memory && | ||
324 | !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize)) | ||
325 | break; | ||
153 | if (adjacent > 0) { | 326 | if (adjacent > 0) { |
154 | rgn->region[i].base -= size; | 327 | type->regions[i].base -= size; |
155 | rgn->region[i].size += size; | 328 | type->regions[i].size += size; |
156 | coalesced++; | 329 | coalesced++; |
157 | break; | 330 | break; |
158 | } else if (adjacent < 0) { | 331 | } else if (adjacent < 0) { |
159 | rgn->region[i].size += size; | 332 | type->regions[i].size += size; |
160 | coalesced++; | 333 | coalesced++; |
161 | break; | 334 | break; |
162 | } | 335 | } |
163 | } | 336 | } |
164 | 337 | ||
165 | if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { | 338 | /* If we plugged a hole, we may want to also coalesce with the |
166 | memblock_coalesce_regions(rgn, i, i+1); | 339 | * next region |
340 | */ | ||
341 | if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) && | ||
342 | ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base, | ||
343 | type->regions[i].size, | ||
344 | type->regions[i+1].base, | ||
345 | type->regions[i+1].size)))) { | ||
346 | memblock_coalesce_regions(type, i, i+1); | ||
167 | coalesced++; | 347 | coalesced++; |
168 | } | 348 | } |
169 | 349 | ||
170 | if (coalesced) | 350 | if (coalesced) |
171 | return coalesced; | 351 | return coalesced; |
172 | if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) | 352 | |
353 | /* If we are out of space, we fail. It's too late to resize the array | ||
354 | * but then this shouldn't have happened in the first place. | ||
355 | */ | ||
356 | if (WARN_ON(type->cnt >= type->max)) | ||
173 | return -1; | 357 | return -1; |
174 | 358 | ||
175 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | 359 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ |
176 | for (i = rgn->cnt - 1; i >= 0; i--) { | 360 | for (i = type->cnt - 1; i >= 0; i--) { |
177 | if (base < rgn->region[i].base) { | 361 | if (base < type->regions[i].base) { |
178 | rgn->region[i+1].base = rgn->region[i].base; | 362 | type->regions[i+1].base = type->regions[i].base; |
179 | rgn->region[i+1].size = rgn->region[i].size; | 363 | type->regions[i+1].size = type->regions[i].size; |
180 | } else { | 364 | } else { |
181 | rgn->region[i+1].base = base; | 365 | type->regions[i+1].base = base; |
182 | rgn->region[i+1].size = size; | 366 | type->regions[i+1].size = size; |
183 | break; | 367 | break; |
184 | } | 368 | } |
185 | } | 369 | } |
186 | 370 | ||
187 | if (base < rgn->region[0].base) { | 371 | if (base < type->regions[0].base) { |
188 | rgn->region[0].base = base; | 372 | type->regions[0].base = base; |
189 | rgn->region[0].size = size; | 373 | type->regions[0].size = size; |
374 | } | ||
375 | type->cnt++; | ||
376 | |||
377 | /* The array is full ? Try to resize it. If that fails, we undo | ||
378 | * our allocation and return an error | ||
379 | */ | ||
380 | if (type->cnt == type->max && memblock_double_array(type)) { | ||
381 | type->cnt--; | ||
382 | return -1; | ||
190 | } | 383 | } |
191 | rgn->cnt++; | ||
192 | 384 | ||
193 | return 0; | 385 | return 0; |
194 | } | 386 | } |
195 | 387 | ||
196 | long memblock_add(u64 base, u64 size) | 388 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
197 | { | 389 | { |
198 | struct memblock_region *_rgn = &memblock.memory; | 390 | return memblock_add_region(&memblock.memory, base, size); |
199 | |||
200 | /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ | ||
201 | if (base == 0) | ||
202 | memblock.rmo_size = size; | ||
203 | |||
204 | return memblock_add_region(_rgn, base, size); | ||
205 | 391 | ||
206 | } | 392 | } |
207 | 393 | ||
208 | static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | 394 | static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
209 | { | 395 | { |
210 | u64 rgnbegin, rgnend; | 396 | phys_addr_t rgnbegin, rgnend; |
211 | u64 end = base + size; | 397 | phys_addr_t end = base + size; |
212 | int i; | 398 | int i; |
213 | 399 | ||
214 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | 400 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
215 | 401 | ||
216 | /* Find the region where (base, size) belongs to */ | 402 | /* Find the region where (base, size) belongs to */ |
217 | for (i=0; i < rgn->cnt; i++) { | 403 | for (i=0; i < type->cnt; i++) { |
218 | rgnbegin = rgn->region[i].base; | 404 | rgnbegin = type->regions[i].base; |
219 | rgnend = rgnbegin + rgn->region[i].size; | 405 | rgnend = rgnbegin + type->regions[i].size; |
220 | 406 | ||
221 | if ((rgnbegin <= base) && (end <= rgnend)) | 407 | if ((rgnbegin <= base) && (end <= rgnend)) |
222 | break; | 408 | break; |
223 | } | 409 | } |
224 | 410 | ||
225 | /* Didn't find the region */ | 411 | /* Didn't find the region */ |
226 | if (i == rgn->cnt) | 412 | if (i == type->cnt) |
227 | return -1; | 413 | return -1; |
228 | 414 | ||
229 | /* Check to see if we are removing entire region */ | 415 | /* Check to see if we are removing entire region */ |
230 | if ((rgnbegin == base) && (rgnend == end)) { | 416 | if ((rgnbegin == base) && (rgnend == end)) { |
231 | memblock_remove_region(rgn, i); | 417 | memblock_remove_region(type, i); |
232 | return 0; | 418 | return 0; |
233 | } | 419 | } |
234 | 420 | ||
235 | /* Check to see if region is matching at the front */ | 421 | /* Check to see if region is matching at the front */ |
236 | if (rgnbegin == base) { | 422 | if (rgnbegin == base) { |
237 | rgn->region[i].base = end; | 423 | type->regions[i].base = end; |
238 | rgn->region[i].size -= size; | 424 | type->regions[i].size -= size; |
239 | return 0; | 425 | return 0; |
240 | } | 426 | } |
241 | 427 | ||
242 | /* Check to see if the region is matching at the end */ | 428 | /* Check to see if the region is matching at the end */ |
243 | if (rgnend == end) { | 429 | if (rgnend == end) { |
244 | rgn->region[i].size -= size; | 430 | type->regions[i].size -= size; |
245 | return 0; | 431 | return 0; |
246 | } | 432 | } |
247 | 433 | ||
@@ -249,208 +435,189 @@ static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | |||
249 | * We need to split the entry - adjust the current one to the | 435 | * We need to split the entry - adjust the current one to the |
250 | * beginging of the hole and add the region after hole. | 436 | * beginging of the hole and add the region after hole. |
251 | */ | 437 | */ |
252 | rgn->region[i].size = base - rgn->region[i].base; | 438 | type->regions[i].size = base - type->regions[i].base; |
253 | return memblock_add_region(rgn, end, rgnend - end); | 439 | return memblock_add_region(type, end, rgnend - end); |
254 | } | 440 | } |
255 | 441 | ||
256 | long memblock_remove(u64 base, u64 size) | 442 | long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
257 | { | 443 | { |
258 | return __memblock_remove(&memblock.memory, base, size); | 444 | return __memblock_remove(&memblock.memory, base, size); |
259 | } | 445 | } |
260 | 446 | ||
261 | long __init memblock_free(u64 base, u64 size) | 447 | long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
262 | { | 448 | { |
263 | return __memblock_remove(&memblock.reserved, base, size); | 449 | return __memblock_remove(&memblock.reserved, base, size); |
264 | } | 450 | } |
265 | 451 | ||
266 | long __init memblock_reserve(u64 base, u64 size) | 452 | long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
267 | { | 453 | { |
268 | struct memblock_region *_rgn = &memblock.reserved; | 454 | struct memblock_type *_rgn = &memblock.reserved; |
269 | 455 | ||
270 | BUG_ON(0 == size); | 456 | BUG_ON(0 == size); |
271 | 457 | ||
272 | return memblock_add_region(_rgn, base, size); | 458 | return memblock_add_region(_rgn, base, size); |
273 | } | 459 | } |
274 | 460 | ||
275 | long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) | 461 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
276 | { | 462 | { |
277 | unsigned long i; | 463 | phys_addr_t found; |
278 | 464 | ||
279 | for (i = 0; i < rgn->cnt; i++) { | 465 | /* We align the size to limit fragmentation. Without this, a lot of |
280 | u64 rgnbase = rgn->region[i].base; | 466 | * small allocs quickly eat up the whole reserve array on sparc |
281 | u64 rgnsize = rgn->region[i].size; | 467 | */ |
282 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | 468 | size = memblock_align_up(size, align); |
283 | break; | ||
284 | } | ||
285 | 469 | ||
286 | return (i < rgn->cnt) ? i : -1; | 470 | found = memblock_find_base(size, align, 0, max_addr); |
471 | if (found != MEMBLOCK_ERROR && | ||
472 | memblock_add_region(&memblock.reserved, found, size) >= 0) | ||
473 | return found; | ||
474 | |||
475 | return 0; | ||
287 | } | 476 | } |
288 | 477 | ||
289 | static u64 memblock_align_down(u64 addr, u64 size) | 478 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
290 | { | 479 | { |
291 | return addr & ~(size - 1); | 480 | phys_addr_t alloc; |
481 | |||
482 | alloc = __memblock_alloc_base(size, align, max_addr); | ||
483 | |||
484 | if (alloc == 0) | ||
485 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
486 | (unsigned long long) size, (unsigned long long) max_addr); | ||
487 | |||
488 | return alloc; | ||
292 | } | 489 | } |
293 | 490 | ||
294 | static u64 memblock_align_up(u64 addr, u64 size) | 491 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
295 | { | 492 | { |
296 | return (addr + (size - 1)) & ~(size - 1); | 493 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
297 | } | 494 | } |
298 | 495 | ||
299 | static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, | 496 | |
300 | u64 size, u64 align) | 497 | /* |
498 | * Additional node-local allocators. Search for node memory is bottom up | ||
499 | * and walks memblock regions within that node bottom-up as well, but allocation | ||
500 | * within an memblock region is top-down. XXX I plan to fix that at some stage | ||
501 | * | ||
502 | * WARNING: Only available after early_node_map[] has been populated, | ||
503 | * on some architectures, that is after all the calls to add_active_range() | ||
504 | * have been done to populate it. | ||
505 | */ | ||
506 | |||
507 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) | ||
301 | { | 508 | { |
302 | u64 base, res_base; | 509 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
303 | long j; | 510 | /* |
511 | * This code originates from sparc which really wants use to walk by addresses | ||
512 | * and returns the nid. This is not very convenient for early_pfn_map[] users | ||
513 | * as the map isn't sorted yet, and it really wants to be walked by nid. | ||
514 | * | ||
515 | * For now, I implement the inefficient method below which walks the early | ||
516 | * map multiple times. Eventually we may want to use an ARCH config option | ||
517 | * to implement a completely different method for both case. | ||
518 | */ | ||
519 | unsigned long start_pfn, end_pfn; | ||
520 | int i; | ||
304 | 521 | ||
305 | base = memblock_align_down((end - size), align); | 522 | for (i = 0; i < MAX_NUMNODES; i++) { |
306 | while (start <= base) { | 523 | get_pfn_range_for_nid(i, &start_pfn, &end_pfn); |
307 | j = memblock_overlaps_region(&memblock.reserved, base, size); | 524 | if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) |
308 | if (j < 0) { | 525 | continue; |
309 | /* this area isn't reserved, take it */ | 526 | *nid = i; |
310 | if (memblock_add_region(&memblock.reserved, base, size) < 0) | 527 | return min(end, PFN_PHYS(end_pfn)); |
311 | base = ~(u64)0; | ||
312 | return base; | ||
313 | } | ||
314 | res_base = memblock.reserved.region[j].base; | ||
315 | if (res_base < size) | ||
316 | break; | ||
317 | base = memblock_align_down(res_base - size, align); | ||
318 | } | 528 | } |
529 | #endif | ||
530 | *nid = 0; | ||
319 | 531 | ||
320 | return ~(u64)0; | 532 | return end; |
321 | } | 533 | } |
322 | 534 | ||
323 | static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, | 535 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, |
324 | u64 (*nid_range)(u64, u64, int *), | 536 | phys_addr_t size, |
325 | u64 size, u64 align, int nid) | 537 | phys_addr_t align, int nid) |
326 | { | 538 | { |
327 | u64 start, end; | 539 | phys_addr_t start, end; |
328 | 540 | ||
329 | start = mp->base; | 541 | start = mp->base; |
330 | end = start + mp->size; | 542 | end = start + mp->size; |
331 | 543 | ||
332 | start = memblock_align_up(start, align); | 544 | start = memblock_align_up(start, align); |
333 | while (start < end) { | 545 | while (start < end) { |
334 | u64 this_end; | 546 | phys_addr_t this_end; |
335 | int this_nid; | 547 | int this_nid; |
336 | 548 | ||
337 | this_end = nid_range(start, end, &this_nid); | 549 | this_end = memblock_nid_range(start, end, &this_nid); |
338 | if (this_nid == nid) { | 550 | if (this_nid == nid) { |
339 | u64 ret = memblock_alloc_nid_unreserved(start, this_end, | 551 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); |
340 | size, align); | 552 | if (ret != MEMBLOCK_ERROR && |
341 | if (ret != ~(u64)0) | 553 | memblock_add_region(&memblock.reserved, ret, size) >= 0) |
342 | return ret; | 554 | return ret; |
343 | } | 555 | } |
344 | start = this_end; | 556 | start = this_end; |
345 | } | 557 | } |
346 | 558 | ||
347 | return ~(u64)0; | 559 | return MEMBLOCK_ERROR; |
348 | } | 560 | } |
349 | 561 | ||
350 | u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | 562 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
351 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
352 | { | 563 | { |
353 | struct memblock_region *mem = &memblock.memory; | 564 | struct memblock_type *mem = &memblock.memory; |
354 | int i; | 565 | int i; |
355 | 566 | ||
356 | BUG_ON(0 == size); | 567 | BUG_ON(0 == size); |
357 | 568 | ||
569 | /* We align the size to limit fragmentation. Without this, a lot of | ||
570 | * small allocs quickly eat up the whole reserve array on sparc | ||
571 | */ | ||
358 | size = memblock_align_up(size, align); | 572 | size = memblock_align_up(size, align); |
359 | 573 | ||
574 | /* We do a bottom-up search for a region with the right | ||
575 | * nid since that's easier considering how memblock_nid_range() | ||
576 | * works | ||
577 | */ | ||
360 | for (i = 0; i < mem->cnt; i++) { | 578 | for (i = 0; i < mem->cnt; i++) { |
361 | u64 ret = memblock_alloc_nid_region(&mem->region[i], | 579 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], |
362 | nid_range, | ||
363 | size, align, nid); | 580 | size, align, nid); |
364 | if (ret != ~(u64)0) | 581 | if (ret != MEMBLOCK_ERROR) |
365 | return ret; | 582 | return ret; |
366 | } | 583 | } |
367 | 584 | ||
368 | return memblock_alloc(size, align); | 585 | return 0; |
369 | } | ||
370 | |||
371 | u64 __init memblock_alloc(u64 size, u64 align) | ||
372 | { | ||
373 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); | ||
374 | } | 586 | } |
375 | 587 | ||
376 | u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 588 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
377 | { | 589 | { |
378 | u64 alloc; | 590 | phys_addr_t res = memblock_alloc_nid(size, align, nid); |
379 | |||
380 | alloc = __memblock_alloc_base(size, align, max_addr); | ||
381 | 591 | ||
382 | if (alloc == 0) | 592 | if (res) |
383 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | 593 | return res; |
384 | (unsigned long long) size, (unsigned long long) max_addr); | 594 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); |
385 | |||
386 | return alloc; | ||
387 | } | 595 | } |
388 | 596 | ||
389 | u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | ||
390 | { | ||
391 | long i, j; | ||
392 | u64 base = 0; | ||
393 | u64 res_base; | ||
394 | |||
395 | BUG_ON(0 == size); | ||
396 | 597 | ||
397 | size = memblock_align_up(size, align); | 598 | /* |
398 | 599 | * Remaining API functions | |
399 | /* On some platforms, make sure we allocate lowmem */ | 600 | */ |
400 | /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ | ||
401 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | ||
402 | max_addr = MEMBLOCK_REAL_LIMIT; | ||
403 | |||
404 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | ||
405 | u64 memblockbase = memblock.memory.region[i].base; | ||
406 | u64 memblocksize = memblock.memory.region[i].size; | ||
407 | |||
408 | if (memblocksize < size) | ||
409 | continue; | ||
410 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | ||
411 | base = memblock_align_down(memblockbase + memblocksize - size, align); | ||
412 | else if (memblockbase < max_addr) { | ||
413 | base = min(memblockbase + memblocksize, max_addr); | ||
414 | base = memblock_align_down(base - size, align); | ||
415 | } else | ||
416 | continue; | ||
417 | |||
418 | while (base && memblockbase <= base) { | ||
419 | j = memblock_overlaps_region(&memblock.reserved, base, size); | ||
420 | if (j < 0) { | ||
421 | /* this area isn't reserved, take it */ | ||
422 | if (memblock_add_region(&memblock.reserved, base, size) < 0) | ||
423 | return 0; | ||
424 | return base; | ||
425 | } | ||
426 | res_base = memblock.reserved.region[j].base; | ||
427 | if (res_base < size) | ||
428 | break; | ||
429 | base = memblock_align_down(res_base - size, align); | ||
430 | } | ||
431 | } | ||
432 | return 0; | ||
433 | } | ||
434 | 601 | ||
435 | /* You must call memblock_analyze() before this. */ | 602 | /* You must call memblock_analyze() before this. */ |
436 | u64 __init memblock_phys_mem_size(void) | 603 | phys_addr_t __init memblock_phys_mem_size(void) |
437 | { | 604 | { |
438 | return memblock.memory.size; | 605 | return memblock.memory_size; |
439 | } | 606 | } |
440 | 607 | ||
441 | u64 memblock_end_of_DRAM(void) | 608 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
442 | { | 609 | { |
443 | int idx = memblock.memory.cnt - 1; | 610 | int idx = memblock.memory.cnt - 1; |
444 | 611 | ||
445 | return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); | 612 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
446 | } | 613 | } |
447 | 614 | ||
448 | /* You must call memblock_analyze() after this. */ | 615 | /* You must call memblock_analyze() after this. */ |
449 | void __init memblock_enforce_memory_limit(u64 memory_limit) | 616 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
450 | { | 617 | { |
451 | unsigned long i; | 618 | unsigned long i; |
452 | u64 limit; | 619 | phys_addr_t limit; |
453 | struct memblock_property *p; | 620 | struct memblock_region *p; |
454 | 621 | ||
455 | if (!memory_limit) | 622 | if (!memory_limit) |
456 | return; | 623 | return; |
@@ -458,24 +625,21 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
458 | /* Truncate the memblock regions to satisfy the memory limit. */ | 625 | /* Truncate the memblock regions to satisfy the memory limit. */ |
459 | limit = memory_limit; | 626 | limit = memory_limit; |
460 | for (i = 0; i < memblock.memory.cnt; i++) { | 627 | for (i = 0; i < memblock.memory.cnt; i++) { |
461 | if (limit > memblock.memory.region[i].size) { | 628 | if (limit > memblock.memory.regions[i].size) { |
462 | limit -= memblock.memory.region[i].size; | 629 | limit -= memblock.memory.regions[i].size; |
463 | continue; | 630 | continue; |
464 | } | 631 | } |
465 | 632 | ||
466 | memblock.memory.region[i].size = limit; | 633 | memblock.memory.regions[i].size = limit; |
467 | memblock.memory.cnt = i + 1; | 634 | memblock.memory.cnt = i + 1; |
468 | break; | 635 | break; |
469 | } | 636 | } |
470 | 637 | ||
471 | if (memblock.memory.region[0].size < memblock.rmo_size) | ||
472 | memblock.rmo_size = memblock.memory.region[0].size; | ||
473 | |||
474 | memory_limit = memblock_end_of_DRAM(); | 638 | memory_limit = memblock_end_of_DRAM(); |
475 | 639 | ||
476 | /* And truncate any reserves above the limit also. */ | 640 | /* And truncate any reserves above the limit also. */ |
477 | for (i = 0; i < memblock.reserved.cnt; i++) { | 641 | for (i = 0; i < memblock.reserved.cnt; i++) { |
478 | p = &memblock.reserved.region[i]; | 642 | p = &memblock.reserved.regions[i]; |
479 | 643 | ||
480 | if (p->base > memory_limit) | 644 | if (p->base > memory_limit) |
481 | p->size = 0; | 645 | p->size = 0; |
@@ -489,53 +653,190 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
489 | } | 653 | } |
490 | } | 654 | } |
491 | 655 | ||
492 | int __init memblock_is_reserved(u64 addr) | 656 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
657 | { | ||
658 | unsigned int left = 0, right = type->cnt; | ||
659 | |||
660 | do { | ||
661 | unsigned int mid = (right + left) / 2; | ||
662 | |||
663 | if (addr < type->regions[mid].base) | ||
664 | right = mid; | ||
665 | else if (addr >= (type->regions[mid].base + | ||
666 | type->regions[mid].size)) | ||
667 | left = mid + 1; | ||
668 | else | ||
669 | return mid; | ||
670 | } while (left < right); | ||
671 | return -1; | ||
672 | } | ||
673 | |||
674 | int __init memblock_is_reserved(phys_addr_t addr) | ||
675 | { | ||
676 | return memblock_search(&memblock.reserved, addr) != -1; | ||
677 | } | ||
678 | |||
679 | int __init_memblock memblock_is_memory(phys_addr_t addr) | ||
680 | { | ||
681 | return memblock_search(&memblock.memory, addr) != -1; | ||
682 | } | ||
683 | |||
684 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | ||
685 | { | ||
686 | int idx = memblock_search(&memblock.reserved, base); | ||
687 | |||
688 | if (idx == -1) | ||
689 | return 0; | ||
690 | return memblock.reserved.regions[idx].base <= base && | ||
691 | (memblock.reserved.regions[idx].base + | ||
692 | memblock.reserved.regions[idx].size) >= (base + size); | ||
693 | } | ||
694 | |||
695 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | ||
696 | { | ||
697 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | ||
698 | } | ||
699 | |||
700 | |||
701 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) | ||
493 | { | 702 | { |
703 | memblock.current_limit = limit; | ||
704 | } | ||
705 | |||
706 | static void __init_memblock memblock_dump(struct memblock_type *region, char *name) | ||
707 | { | ||
708 | unsigned long long base, size; | ||
494 | int i; | 709 | int i; |
495 | 710 | ||
496 | for (i = 0; i < memblock.reserved.cnt; i++) { | 711 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); |
497 | u64 upper = memblock.reserved.region[i].base + | 712 | |
498 | memblock.reserved.region[i].size - 1; | 713 | for (i = 0; i < region->cnt; i++) { |
499 | if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) | 714 | base = region->regions[i].base; |
500 | return 1; | 715 | size = region->regions[i].size; |
716 | |||
717 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", | ||
718 | name, i, base, base + size - 1, size); | ||
501 | } | 719 | } |
502 | return 0; | ||
503 | } | 720 | } |
504 | 721 | ||
505 | int memblock_is_region_reserved(u64 base, u64 size) | 722 | void __init_memblock memblock_dump_all(void) |
506 | { | 723 | { |
507 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | 724 | if (!memblock_debug) |
725 | return; | ||
726 | |||
727 | pr_info("MEMBLOCK configuration:\n"); | ||
728 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | ||
729 | |||
730 | memblock_dump(&memblock.memory, "memory"); | ||
731 | memblock_dump(&memblock.reserved, "reserved"); | ||
508 | } | 732 | } |
509 | 733 | ||
510 | /* | 734 | void __init memblock_analyze(void) |
511 | * Given a <base, len>, find which memory regions belong to this range. | ||
512 | * Adjust the request and return a contiguous chunk. | ||
513 | */ | ||
514 | int memblock_find(struct memblock_property *res) | ||
515 | { | 735 | { |
516 | int i; | 736 | int i; |
517 | u64 rstart, rend; | ||
518 | 737 | ||
519 | rstart = res->base; | 738 | /* Check marker in the unused last array entry */ |
520 | rend = rstart + res->size - 1; | 739 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base |
740 | != (phys_addr_t)RED_INACTIVE); | ||
741 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | ||
742 | != (phys_addr_t)RED_INACTIVE); | ||
743 | |||
744 | memblock.memory_size = 0; | ||
745 | |||
746 | for (i = 0; i < memblock.memory.cnt; i++) | ||
747 | memblock.memory_size += memblock.memory.regions[i].size; | ||
748 | |||
749 | /* We allow resizing from there */ | ||
750 | memblock_can_resize = 1; | ||
751 | } | ||
752 | |||
753 | void __init memblock_init(void) | ||
754 | { | ||
755 | static int init_done __initdata = 0; | ||
756 | |||
757 | if (init_done) | ||
758 | return; | ||
759 | init_done = 1; | ||
760 | |||
761 | /* Hookup the initial arrays */ | ||
762 | memblock.memory.regions = memblock_memory_init_regions; | ||
763 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | ||
764 | memblock.reserved.regions = memblock_reserved_init_regions; | ||
765 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | ||
766 | |||
767 | /* Write a marker in the unused last array entry */ | ||
768 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | ||
769 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | ||
770 | |||
771 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | ||
772 | * This simplifies the memblock_add() code below... | ||
773 | */ | ||
774 | memblock.memory.regions[0].base = 0; | ||
775 | memblock.memory.regions[0].size = 0; | ||
776 | memblock.memory.cnt = 1; | ||
777 | |||
778 | /* Ditto. */ | ||
779 | memblock.reserved.regions[0].base = 0; | ||
780 | memblock.reserved.regions[0].size = 0; | ||
781 | memblock.reserved.cnt = 1; | ||
782 | |||
783 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | ||
784 | } | ||
785 | |||
786 | static int __init early_memblock(char *p) | ||
787 | { | ||
788 | if (p && strstr(p, "debug")) | ||
789 | memblock_debug = 1; | ||
790 | return 0; | ||
791 | } | ||
792 | early_param("memblock", early_memblock); | ||
793 | |||
794 | #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) | ||
795 | |||
796 | static int memblock_debug_show(struct seq_file *m, void *private) | ||
797 | { | ||
798 | struct memblock_type *type = m->private; | ||
799 | struct memblock_region *reg; | ||
800 | int i; | ||
801 | |||
802 | for (i = 0; i < type->cnt; i++) { | ||
803 | reg = &type->regions[i]; | ||
804 | seq_printf(m, "%4d: ", i); | ||
805 | if (sizeof(phys_addr_t) == 4) | ||
806 | seq_printf(m, "0x%08lx..0x%08lx\n", | ||
807 | (unsigned long)reg->base, | ||
808 | (unsigned long)(reg->base + reg->size - 1)); | ||
809 | else | ||
810 | seq_printf(m, "0x%016llx..0x%016llx\n", | ||
811 | (unsigned long long)reg->base, | ||
812 | (unsigned long long)(reg->base + reg->size - 1)); | ||
521 | 813 | ||
522 | for (i = 0; i < memblock.memory.cnt; i++) { | ||
523 | u64 start = memblock.memory.region[i].base; | ||
524 | u64 end = start + memblock.memory.region[i].size - 1; | ||
525 | |||
526 | if (start > rend) | ||
527 | return -1; | ||
528 | |||
529 | if ((end >= rstart) && (start < rend)) { | ||
530 | /* adjust the request */ | ||
531 | if (rstart < start) | ||
532 | rstart = start; | ||
533 | if (rend > end) | ||
534 | rend = end; | ||
535 | res->base = rstart; | ||
536 | res->size = rend - rstart + 1; | ||
537 | return 0; | ||
538 | } | ||
539 | } | 814 | } |
540 | return -1; | 815 | return 0; |
816 | } | ||
817 | |||
818 | static int memblock_debug_open(struct inode *inode, struct file *file) | ||
819 | { | ||
820 | return single_open(file, memblock_debug_show, inode->i_private); | ||
541 | } | 821 | } |
822 | |||
823 | static const struct file_operations memblock_debug_fops = { | ||
824 | .open = memblock_debug_open, | ||
825 | .read = seq_read, | ||
826 | .llseek = seq_lseek, | ||
827 | .release = single_release, | ||
828 | }; | ||
829 | |||
830 | static int __init memblock_init_debugfs(void) | ||
831 | { | ||
832 | struct dentry *root = debugfs_create_dir("memblock", NULL); | ||
833 | if (!root) | ||
834 | return -ENXIO; | ||
835 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | ||
836 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | __initcall(memblock_init_debugfs); | ||
841 | |||
842 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3eed583895a6..9be3cf8a5da4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3587,9 +3587,13 @@ unlock: | |||
3587 | 3587 | ||
3588 | static void mem_cgroup_threshold(struct mem_cgroup *memcg) | 3588 | static void mem_cgroup_threshold(struct mem_cgroup *memcg) |
3589 | { | 3589 | { |
3590 | __mem_cgroup_threshold(memcg, false); | 3590 | while (memcg) { |
3591 | if (do_swap_account) | 3591 | __mem_cgroup_threshold(memcg, false); |
3592 | __mem_cgroup_threshold(memcg, true); | 3592 | if (do_swap_account) |
3593 | __mem_cgroup_threshold(memcg, true); | ||
3594 | |||
3595 | memcg = parent_mem_cgroup(memcg); | ||
3596 | } | ||
3593 | } | 3597 | } |
3594 | 3598 | ||
3595 | static int compare_thresholds(const void *a, const void *b) | 3599 | static int compare_thresholds(const void *a, const void *b) |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9c26eeca1342..757f6b0accfe 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter); | |||
183 | * signal. | 183 | * signal. |
184 | */ | 184 | */ |
185 | static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, | 185 | static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, |
186 | unsigned long pfn) | 186 | unsigned long pfn, struct page *page) |
187 | { | 187 | { |
188 | struct siginfo si; | 188 | struct siginfo si; |
189 | int ret; | 189 | int ret; |
@@ -198,7 +198,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, | |||
198 | #ifdef __ARCH_SI_TRAPNO | 198 | #ifdef __ARCH_SI_TRAPNO |
199 | si.si_trapno = trapno; | 199 | si.si_trapno = trapno; |
200 | #endif | 200 | #endif |
201 | si.si_addr_lsb = PAGE_SHIFT; | 201 | si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; |
202 | /* | 202 | /* |
203 | * Don't use force here, it's convenient if the signal | 203 | * Don't use force here, it's convenient if the signal |
204 | * can be temporarily blocked. | 204 | * can be temporarily blocked. |
@@ -235,7 +235,7 @@ void shake_page(struct page *p, int access) | |||
235 | int nr; | 235 | int nr; |
236 | do { | 236 | do { |
237 | nr = shrink_slab(1000, GFP_KERNEL, 1000); | 237 | nr = shrink_slab(1000, GFP_KERNEL, 1000); |
238 | if (page_count(p) == 0) | 238 | if (page_count(p) == 1) |
239 | break; | 239 | break; |
240 | } while (nr > 10); | 240 | } while (nr > 10); |
241 | } | 241 | } |
@@ -327,7 +327,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, | |||
327 | * wrong earlier. | 327 | * wrong earlier. |
328 | */ | 328 | */ |
329 | static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, | 329 | static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, |
330 | int fail, unsigned long pfn) | 330 | int fail, struct page *page, unsigned long pfn) |
331 | { | 331 | { |
332 | struct to_kill *tk, *next; | 332 | struct to_kill *tk, *next; |
333 | 333 | ||
@@ -352,7 +352,7 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, | |||
352 | * process anyways. | 352 | * process anyways. |
353 | */ | 353 | */ |
354 | else if (kill_proc_ao(tk->tsk, tk->addr, trapno, | 354 | else if (kill_proc_ao(tk->tsk, tk->addr, trapno, |
355 | pfn) < 0) | 355 | pfn, page) < 0) |
356 | printk(KERN_ERR | 356 | printk(KERN_ERR |
357 | "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", | 357 | "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", |
358 | pfn, tk->tsk->comm, tk->tsk->pid); | 358 | pfn, tk->tsk->comm, tk->tsk->pid); |
@@ -928,7 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
928 | * any accesses to the poisoned memory. | 928 | * any accesses to the poisoned memory. |
929 | */ | 929 | */ |
930 | kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, | 930 | kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, |
931 | ret != SWAP_SUCCESS, pfn); | 931 | ret != SWAP_SUCCESS, p, pfn); |
932 | 932 | ||
933 | return ret; | 933 | return ret; |
934 | } | 934 | } |
diff --git a/mm/memory.c b/mm/memory.c index 6b2ab1051851..98b58fecedef 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2623 | unsigned int flags, pte_t orig_pte) | 2623 | unsigned int flags, pte_t orig_pte) |
2624 | { | 2624 | { |
2625 | spinlock_t *ptl; | 2625 | spinlock_t *ptl; |
2626 | struct page *page; | 2626 | struct page *page, *swapcache = NULL; |
2627 | swp_entry_t entry; | 2627 | swp_entry_t entry; |
2628 | pte_t pte; | 2628 | pte_t pte; |
2629 | struct mem_cgroup *ptr = NULL; | 2629 | struct mem_cgroup *ptr = NULL; |
@@ -2679,10 +2679,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2679 | lock_page(page); | 2679 | lock_page(page); |
2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
2681 | 2681 | ||
2682 | page = ksm_might_need_to_copy(page, vma, address); | 2682 | /* |
2683 | if (!page) { | 2683 | * Make sure try_to_free_swap or reuse_swap_page or swapoff did not |
2684 | ret = VM_FAULT_OOM; | 2684 | * release the swapcache from under us. The page pin, and pte_same |
2685 | goto out; | 2685 | * test below, are not enough to exclude that. Even if it is still |
2686 | * swapcache, we need to check that the page's swap has not changed. | ||
2687 | */ | ||
2688 | if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) | ||
2689 | goto out_page; | ||
2690 | |||
2691 | if (ksm_might_need_to_copy(page, vma, address)) { | ||
2692 | swapcache = page; | ||
2693 | page = ksm_does_need_to_copy(page, vma, address); | ||
2694 | |||
2695 | if (unlikely(!page)) { | ||
2696 | ret = VM_FAULT_OOM; | ||
2697 | page = swapcache; | ||
2698 | swapcache = NULL; | ||
2699 | goto out_page; | ||
2700 | } | ||
2686 | } | 2701 | } |
2687 | 2702 | ||
2688 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { | 2703 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { |
@@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2735 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | 2750 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) |
2736 | try_to_free_swap(page); | 2751 | try_to_free_swap(page); |
2737 | unlock_page(page); | 2752 | unlock_page(page); |
2753 | if (swapcache) { | ||
2754 | /* | ||
2755 | * Hold the lock to avoid the swap entry to be reused | ||
2756 | * until we take the PT lock for the pte_same() check | ||
2757 | * (to avoid false positives from pte_same). For | ||
2758 | * further safety release the lock after the swap_free | ||
2759 | * so that the swap count won't change under a | ||
2760 | * parallel locked swapcache. | ||
2761 | */ | ||
2762 | unlock_page(swapcache); | ||
2763 | page_cache_release(swapcache); | ||
2764 | } | ||
2738 | 2765 | ||
2739 | if (flags & FAULT_FLAG_WRITE) { | 2766 | if (flags & FAULT_FLAG_WRITE) { |
2740 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); | 2767 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
@@ -2756,6 +2783,10 @@ out_page: | |||
2756 | unlock_page(page); | 2783 | unlock_page(page); |
2757 | out_release: | 2784 | out_release: |
2758 | page_cache_release(page); | 2785 | page_cache_release(page); |
2786 | if (swapcache) { | ||
2787 | unlock_page(swapcache); | ||
2788 | page_cache_release(swapcache); | ||
2789 | } | ||
2759 | return ret; | 2790 | return ret; |
2760 | } | 2791 | } |
2761 | 2792 | ||
@@ -3154,7 +3185,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
3154 | * with threads. | 3185 | * with threads. |
3155 | */ | 3186 | */ |
3156 | if (flags & FAULT_FLAG_WRITE) | 3187 | if (flags & FAULT_FLAG_WRITE) |
3157 | flush_tlb_page(vma, address); | 3188 | flush_tlb_fix_spurious_fault(vma, address); |
3158 | } | 3189 | } |
3159 | unlock: | 3190 | unlock: |
3160 | pte_unmap_unlock(pte, ptl); | 3191 | pte_unmap_unlock(pte, ptl); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a4cfcdc00455..d4e940a26945 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page) | |||
584 | /* Return the start of the next active pageblock after a given page */ | 584 | /* Return the start of the next active pageblock after a given page */ |
585 | static struct page *next_active_pageblock(struct page *page) | 585 | static struct page *next_active_pageblock(struct page *page) |
586 | { | 586 | { |
587 | int pageblocks_stride; | ||
588 | |||
589 | /* Ensure the starting page is pageblock-aligned */ | 587 | /* Ensure the starting page is pageblock-aligned */ |
590 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | 588 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); |
591 | 589 | ||
592 | /* Move forward by at least 1 * pageblock_nr_pages */ | ||
593 | pageblocks_stride = 1; | ||
594 | |||
595 | /* If the entire pageblock is free, move to the end of free page */ | 590 | /* If the entire pageblock is free, move to the end of free page */ |
596 | if (pageblock_free(page)) | 591 | if (pageblock_free(page)) { |
597 | pageblocks_stride += page_order(page) - pageblock_order; | 592 | int order; |
593 | /* be careful. we don't have locks, page_order can be changed.*/ | ||
594 | order = page_order(page); | ||
595 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | ||
596 | return page + (1 << order); | ||
597 | } | ||
598 | 598 | ||
599 | return page + (pageblocks_stride * pageblock_nr_pages); | 599 | return page + pageblock_nr_pages; |
600 | } | 600 | } |
601 | 601 | ||
602 | /* Checks if this range of memory is likely to be hot-removable. */ | 602 | /* Checks if this range of memory is likely to be hot-removable. */ |
@@ -840,7 +840,6 @@ repeat: | |||
840 | ret = 0; | 840 | ret = 0; |
841 | if (drain) { | 841 | if (drain) { |
842 | lru_add_drain_all(); | 842 | lru_add_drain_all(); |
843 | flush_scheduled_work(); | ||
844 | cond_resched(); | 843 | cond_resched(); |
845 | drain_all_pages(); | 844 | drain_all_pages(); |
846 | } | 845 | } |
@@ -862,7 +861,6 @@ repeat: | |||
862 | } | 861 | } |
863 | /* drain all zone's lru pagevec, this is asyncronous... */ | 862 | /* drain all zone's lru pagevec, this is asyncronous... */ |
864 | lru_add_drain_all(); | 863 | lru_add_drain_all(); |
865 | flush_scheduled_work(); | ||
866 | yield(); | 864 | yield(); |
867 | /* drain pcp pages , this is synchrouns. */ | 865 | /* drain pcp pages , this is synchrouns. */ |
868 | drain_all_pages(); | 866 | drain_all_pages(); |
diff --git a/mm/mlock.c b/mm/mlock.c index cbae7c5b9568..b70919ce4f72 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page) | |||
135 | } | 135 | } |
136 | } | 136 | } |
137 | 137 | ||
138 | /* Is the vma a continuation of the stack vma above it? */ | ||
139 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
140 | { | ||
141 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
142 | } | ||
143 | |||
144 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | 138 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) |
145 | { | 139 | { |
146 | return (vma->vm_flags & VM_GROWSDOWN) && | 140 | return (vma->vm_flags & VM_GROWSDOWN) && |
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
2009 | removed_exe_file_vma(mm); | 2009 | removed_exe_file_vma(mm); |
2010 | fput(new->vm_file); | 2010 | fput(new->vm_file); |
2011 | } | 2011 | } |
2012 | unlink_anon_vmas(new); | ||
2012 | out_free_mpol: | 2013 | out_free_mpol: |
2013 | mpol_put(pol); | 2014 | mpol_put(pol); |
2014 | out_free_vma: | 2015 | out_free_vma: |
diff --git a/mm/mmzone.c b/mm/mmzone.c index f5b7d1760213..e35bfb82c855 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn, | |||
87 | return 1; | 87 | return 1; |
88 | } | 88 | } |
89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | 89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | /* Called when a more accurate view of NR_FREE_PAGES is needed */ | ||
93 | unsigned long zone_nr_free_pages(struct zone *zone) | ||
94 | { | ||
95 | unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES); | ||
96 | |||
97 | /* | ||
98 | * While kswapd is awake, it is considered the zone is under some | ||
99 | * memory pressure. Under pressure, there is a risk that | ||
100 | * per-cpu-counter-drift will allow the min watermark to be breached | ||
101 | * potentially causing a live-lock. While kswapd is awake and | ||
102 | * free pages are low, get a better estimate for free pages | ||
103 | */ | ||
104 | if (nr_free_pages < zone->percpu_drift_mark && | ||
105 | !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) | ||
106 | return zone_page_state_snapshot(zone, NR_FREE_PAGES); | ||
107 | |||
108 | return nr_free_pages; | ||
109 | } | ||
110 | #endif /* CONFIG_SMP */ | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index fc81cb22869e..4029583a1024 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /* return true if the task is not adequate as candidate victim task. */ | 123 | /* return true if the task is not adequate as candidate victim task. */ |
124 | static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem, | 124 | static bool oom_unkillable_task(struct task_struct *p, |
125 | const nodemask_t *nodemask) | 125 | const struct mem_cgroup *mem, const nodemask_t *nodemask) |
126 | { | 126 | { |
127 | if (is_global_init(p)) | 127 | if (is_global_init(p)) |
128 | return true; | 128 | return true; |
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | |||
208 | */ | 208 | */ |
209 | points += p->signal->oom_score_adj; | 209 | points += p->signal->oom_score_adj; |
210 | 210 | ||
211 | if (points < 0) | 211 | /* |
212 | return 0; | 212 | * Never return 0 for an eligible task that may be killed since it's |
213 | * possible that no single user task uses more than 0.1% of memory and | ||
214 | * no single admin tasks uses more than 3.0%. | ||
215 | */ | ||
216 | if (points <= 0) | ||
217 | return 1; | ||
213 | return (points < 1000) ? points : 1000; | 218 | return (points < 1000) ? points : 1000; |
214 | } | 219 | } |
215 | 220 | ||
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
339 | /** | 344 | /** |
340 | * dump_tasks - dump current memory state of all system tasks | 345 | * dump_tasks - dump current memory state of all system tasks |
341 | * @mem: current's memory controller, if constrained | 346 | * @mem: current's memory controller, if constrained |
347 | * @nodemask: nodemask passed to page allocator for mempolicy ooms | ||
342 | * | 348 | * |
343 | * Dumps the current memory state of all system tasks, excluding kernel threads. | 349 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
350 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | ||
351 | * are not shown. | ||
344 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj | 352 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj |
345 | * value, oom_score_adj value, and name. | 353 | * value, oom_score_adj value, and name. |
346 | * | 354 | * |
347 | * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are | ||
348 | * shown. | ||
349 | * | ||
350 | * Call with tasklist_lock read-locked. | 355 | * Call with tasklist_lock read-locked. |
351 | */ | 356 | */ |
352 | static void dump_tasks(const struct mem_cgroup *mem) | 357 | static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) |
353 | { | 358 | { |
354 | struct task_struct *p; | 359 | struct task_struct *p; |
355 | struct task_struct *task; | 360 | struct task_struct *task; |
356 | 361 | ||
357 | pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); | 362 | pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); |
358 | for_each_process(p) { | 363 | for_each_process(p) { |
359 | if (p->flags & PF_KTHREAD) | 364 | if (oom_unkillable_task(p, mem, nodemask)) |
360 | continue; | ||
361 | if (mem && !task_in_mem_cgroup(p, mem)) | ||
362 | continue; | 365 | continue; |
363 | 366 | ||
364 | task = find_lock_task_mm(p); | 367 | task = find_lock_task_mm(p); |
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
381 | } | 384 | } |
382 | 385 | ||
383 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | 386 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
384 | struct mem_cgroup *mem) | 387 | struct mem_cgroup *mem, const nodemask_t *nodemask) |
385 | { | 388 | { |
386 | task_lock(current); | 389 | task_lock(current); |
387 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " | 390 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
394 | mem_cgroup_print_oom_info(mem, p); | 397 | mem_cgroup_print_oom_info(mem, p); |
395 | show_mem(); | 398 | show_mem(); |
396 | if (sysctl_oom_dump_tasks) | 399 | if (sysctl_oom_dump_tasks) |
397 | dump_tasks(mem); | 400 | dump_tasks(mem, nodemask); |
398 | } | 401 | } |
399 | 402 | ||
400 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 403 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
436 | unsigned int victim_points = 0; | 439 | unsigned int victim_points = 0; |
437 | 440 | ||
438 | if (printk_ratelimit()) | 441 | if (printk_ratelimit()) |
439 | dump_header(p, gfp_mask, order, mem); | 442 | dump_header(p, gfp_mask, order, mem, nodemask); |
440 | 443 | ||
441 | /* | 444 | /* |
442 | * If the task is already exiting, don't alarm the sysadmin or kill | 445 | * If the task is already exiting, don't alarm the sysadmin or kill |
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
482 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | 485 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. |
483 | */ | 486 | */ |
484 | static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | 487 | static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, |
485 | int order) | 488 | int order, const nodemask_t *nodemask) |
486 | { | 489 | { |
487 | if (likely(!sysctl_panic_on_oom)) | 490 | if (likely(!sysctl_panic_on_oom)) |
488 | return; | 491 | return; |
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | |||
496 | return; | 499 | return; |
497 | } | 500 | } |
498 | read_lock(&tasklist_lock); | 501 | read_lock(&tasklist_lock); |
499 | dump_header(NULL, gfp_mask, order, NULL); | 502 | dump_header(NULL, gfp_mask, order, NULL, nodemask); |
500 | read_unlock(&tasklist_lock); | 503 | read_unlock(&tasklist_lock); |
501 | panic("Out of memory: %s panic_on_oom is enabled\n", | 504 | panic("Out of memory: %s panic_on_oom is enabled\n", |
502 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | 505 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); |
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
509 | unsigned int points = 0; | 512 | unsigned int points = 0; |
510 | struct task_struct *p; | 513 | struct task_struct *p; |
511 | 514 | ||
512 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0); | 515 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); |
513 | limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; | 516 | limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; |
514 | read_lock(&tasklist_lock); | 517 | read_lock(&tasklist_lock); |
515 | retry: | 518 | retry: |
@@ -641,6 +644,7 @@ static void clear_system_oom(void) | |||
641 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | 644 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
642 | int order, nodemask_t *nodemask) | 645 | int order, nodemask_t *nodemask) |
643 | { | 646 | { |
647 | const nodemask_t *mpol_mask; | ||
644 | struct task_struct *p; | 648 | struct task_struct *p; |
645 | unsigned long totalpages; | 649 | unsigned long totalpages; |
646 | unsigned long freed = 0; | 650 | unsigned long freed = 0; |
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
670 | */ | 674 | */ |
671 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask, | 675 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask, |
672 | &totalpages); | 676 | &totalpages); |
673 | check_panic_on_oom(constraint, gfp_mask, order); | 677 | mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; |
678 | check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); | ||
674 | 679 | ||
675 | read_lock(&tasklist_lock); | 680 | read_lock(&tasklist_lock); |
676 | if (sysctl_oom_kill_allocating_task && | 681 | if (sysctl_oom_kill_allocating_task && |
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
688 | } | 693 | } |
689 | 694 | ||
690 | retry: | 695 | retry: |
691 | p = select_bad_process(&points, totalpages, NULL, | 696 | p = select_bad_process(&points, totalpages, NULL, mpol_mask); |
692 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : | ||
693 | NULL); | ||
694 | if (PTR_ERR(p) == -1UL) | 697 | if (PTR_ERR(p) == -1UL) |
695 | goto out; | 698 | goto out; |
696 | 699 | ||
697 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 700 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
698 | if (!p) { | 701 | if (!p) { |
699 | dump_header(NULL, gfp_mask, order, NULL); | 702 | dump_header(NULL, gfp_mask, order, NULL, mpol_mask); |
700 | read_unlock(&tasklist_lock); | 703 | read_unlock(&tasklist_lock); |
701 | panic("Out of memory and no killable processes...\n"); | 704 | panic("Out of memory and no killable processes...\n"); |
702 | } | 705 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a9649f4b261e..2a362c52fdf4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/memblock.h> | ||
24 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/kmemcheck.h> | 27 | #include <linux/kmemcheck.h> |
@@ -588,13 +589,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
588 | { | 589 | { |
589 | int migratetype = 0; | 590 | int migratetype = 0; |
590 | int batch_free = 0; | 591 | int batch_free = 0; |
592 | int to_free = count; | ||
591 | 593 | ||
592 | spin_lock(&zone->lock); | 594 | spin_lock(&zone->lock); |
593 | zone->all_unreclaimable = 0; | 595 | zone->all_unreclaimable = 0; |
594 | zone->pages_scanned = 0; | 596 | zone->pages_scanned = 0; |
595 | 597 | ||
596 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 598 | while (to_free) { |
597 | while (count) { | ||
598 | struct page *page; | 599 | struct page *page; |
599 | struct list_head *list; | 600 | struct list_head *list; |
600 | 601 | ||
@@ -619,8 +620,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
619 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ | 620 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
620 | __free_one_page(page, zone, 0, page_private(page)); | 621 | __free_one_page(page, zone, 0, page_private(page)); |
621 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); | 622 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); |
622 | } while (--count && --batch_free && !list_empty(list)); | 623 | } while (--to_free && --batch_free && !list_empty(list)); |
623 | } | 624 | } |
625 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | ||
624 | spin_unlock(&zone->lock); | 626 | spin_unlock(&zone->lock); |
625 | } | 627 | } |
626 | 628 | ||
@@ -631,8 +633,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
631 | zone->all_unreclaimable = 0; | 633 | zone->all_unreclaimable = 0; |
632 | zone->pages_scanned = 0; | 634 | zone->pages_scanned = 0; |
633 | 635 | ||
634 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
635 | __free_one_page(page, zone, order, migratetype); | 636 | __free_one_page(page, zone, order, migratetype); |
637 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
636 | spin_unlock(&zone->lock); | 638 | spin_unlock(&zone->lock); |
637 | } | 639 | } |
638 | 640 | ||
@@ -1461,7 +1463,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1461 | { | 1463 | { |
1462 | /* free_pages my go negative - that's OK */ | 1464 | /* free_pages my go negative - that's OK */ |
1463 | long min = mark; | 1465 | long min = mark; |
1464 | long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; | 1466 | long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; |
1465 | int o; | 1467 | int o; |
1466 | 1468 | ||
1467 | if (alloc_flags & ALLOC_HIGH) | 1469 | if (alloc_flags & ALLOC_HIGH) |
@@ -1846,6 +1848,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1846 | struct page *page = NULL; | 1848 | struct page *page = NULL; |
1847 | struct reclaim_state reclaim_state; | 1849 | struct reclaim_state reclaim_state; |
1848 | struct task_struct *p = current; | 1850 | struct task_struct *p = current; |
1851 | bool drained = false; | ||
1849 | 1852 | ||
1850 | cond_resched(); | 1853 | cond_resched(); |
1851 | 1854 | ||
@@ -1864,14 +1867,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1864 | 1867 | ||
1865 | cond_resched(); | 1868 | cond_resched(); |
1866 | 1869 | ||
1867 | if (order != 0) | 1870 | if (unlikely(!(*did_some_progress))) |
1868 | drain_all_pages(); | 1871 | return NULL; |
1869 | 1872 | ||
1870 | if (likely(*did_some_progress)) | 1873 | retry: |
1871 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 1874 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
1872 | zonelist, high_zoneidx, | 1875 | zonelist, high_zoneidx, |
1873 | alloc_flags, preferred_zone, | 1876 | alloc_flags, preferred_zone, |
1874 | migratetype); | 1877 | migratetype); |
1878 | |||
1879 | /* | ||
1880 | * If an allocation failed after direct reclaim, it could be because | ||
1881 | * pages are pinned on the per-cpu lists. Drain them and try again | ||
1882 | */ | ||
1883 | if (!page && !drained) { | ||
1884 | drain_all_pages(); | ||
1885 | drained = true; | ||
1886 | goto retry; | ||
1887 | } | ||
1888 | |||
1875 | return page; | 1889 | return page; |
1876 | } | 1890 | } |
1877 | 1891 | ||
@@ -2423,7 +2437,7 @@ void show_free_areas(void) | |||
2423 | " all_unreclaimable? %s" | 2437 | " all_unreclaimable? %s" |
2424 | "\n", | 2438 | "\n", |
2425 | zone->name, | 2439 | zone->name, |
2426 | K(zone_page_state(zone, NR_FREE_PAGES)), | 2440 | K(zone_nr_free_pages(zone)), |
2427 | K(min_wmark_pages(zone)), | 2441 | K(min_wmark_pages(zone)), |
2428 | K(low_wmark_pages(zone)), | 2442 | K(low_wmark_pages(zone)), |
2429 | K(high_wmark_pages(zone)), | 2443 | K(high_wmark_pages(zone)), |
@@ -3623,6 +3637,41 @@ void __init free_bootmem_with_active_regions(int nid, | |||
3623 | } | 3637 | } |
3624 | } | 3638 | } |
3625 | 3639 | ||
3640 | #ifdef CONFIG_HAVE_MEMBLOCK | ||
3641 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
3642 | u64 goal, u64 limit) | ||
3643 | { | ||
3644 | int i; | ||
3645 | |||
3646 | /* Need to go over early_node_map to find out good range for node */ | ||
3647 | for_each_active_range_index_in_nid(i, nid) { | ||
3648 | u64 addr; | ||
3649 | u64 ei_start, ei_last; | ||
3650 | u64 final_start, final_end; | ||
3651 | |||
3652 | ei_last = early_node_map[i].end_pfn; | ||
3653 | ei_last <<= PAGE_SHIFT; | ||
3654 | ei_start = early_node_map[i].start_pfn; | ||
3655 | ei_start <<= PAGE_SHIFT; | ||
3656 | |||
3657 | final_start = max(ei_start, goal); | ||
3658 | final_end = min(ei_last, limit); | ||
3659 | |||
3660 | if (final_start >= final_end) | ||
3661 | continue; | ||
3662 | |||
3663 | addr = memblock_find_in_range(final_start, final_end, size, align); | ||
3664 | |||
3665 | if (addr == MEMBLOCK_ERROR) | ||
3666 | continue; | ||
3667 | |||
3668 | return addr; | ||
3669 | } | ||
3670 | |||
3671 | return MEMBLOCK_ERROR; | ||
3672 | } | ||
3673 | #endif | ||
3674 | |||
3626 | int __init add_from_early_node_map(struct range *range, int az, | 3675 | int __init add_from_early_node_map(struct range *range, int az, |
3627 | int nr_range, int nid) | 3676 | int nr_range, int nid) |
3628 | { | 3677 | { |
@@ -3642,46 +3691,26 @@ int __init add_from_early_node_map(struct range *range, int az, | |||
3642 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | 3691 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, |
3643 | u64 goal, u64 limit) | 3692 | u64 goal, u64 limit) |
3644 | { | 3693 | { |
3645 | int i; | ||
3646 | void *ptr; | 3694 | void *ptr; |
3695 | u64 addr; | ||
3647 | 3696 | ||
3648 | if (limit > get_max_mapped()) | 3697 | if (limit > memblock.current_limit) |
3649 | limit = get_max_mapped(); | 3698 | limit = memblock.current_limit; |
3650 | 3699 | ||
3651 | /* need to go over early_node_map to find out good range for node */ | 3700 | addr = find_memory_core_early(nid, size, align, goal, limit); |
3652 | for_each_active_range_index_in_nid(i, nid) { | ||
3653 | u64 addr; | ||
3654 | u64 ei_start, ei_last; | ||
3655 | 3701 | ||
3656 | ei_last = early_node_map[i].end_pfn; | 3702 | if (addr == MEMBLOCK_ERROR) |
3657 | ei_last <<= PAGE_SHIFT; | 3703 | return NULL; |
3658 | ei_start = early_node_map[i].start_pfn; | ||
3659 | ei_start <<= PAGE_SHIFT; | ||
3660 | addr = find_early_area(ei_start, ei_last, | ||
3661 | goal, limit, size, align); | ||
3662 | |||
3663 | if (addr == -1ULL) | ||
3664 | continue; | ||
3665 | |||
3666 | #if 0 | ||
3667 | printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n", | ||
3668 | nid, | ||
3669 | ei_start, ei_last, goal, limit, size, | ||
3670 | align, addr); | ||
3671 | #endif | ||
3672 | |||
3673 | ptr = phys_to_virt(addr); | ||
3674 | memset(ptr, 0, size); | ||
3675 | reserve_early_without_check(addr, addr + size, "BOOTMEM"); | ||
3676 | /* | ||
3677 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3678 | * are never reported as leaks. | ||
3679 | */ | ||
3680 | kmemleak_alloc(ptr, size, 0, 0); | ||
3681 | return ptr; | ||
3682 | } | ||
3683 | 3704 | ||
3684 | return NULL; | 3705 | ptr = phys_to_virt(addr); |
3706 | memset(ptr, 0, size); | ||
3707 | memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); | ||
3708 | /* | ||
3709 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3710 | * are never reported as leaks. | ||
3711 | */ | ||
3712 | kmemleak_alloc(ptr, size, 0, 0); | ||
3713 | return ptr; | ||
3685 | } | 3714 | } |
3686 | #endif | 3715 | #endif |
3687 | 3716 | ||
@@ -5169,9 +5198,9 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
5169 | if (!table) | 5198 | if (!table) |
5170 | panic("Failed to allocate %s hash table\n", tablename); | 5199 | panic("Failed to allocate %s hash table\n", tablename); |
5171 | 5200 | ||
5172 | printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", | 5201 | printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", |
5173 | tablename, | 5202 | tablename, |
5174 | (1U << log2qty), | 5203 | (1UL << log2qty), |
5175 | ilog2(size) - PAGE_SHIFT, | 5204 | ilog2(size) - PAGE_SHIFT, |
5176 | size); | 5205 | size); |
5177 | 5206 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 12dea33572bd..6fc9015534f8 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1258,9 +1258,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1258 | 1258 | ||
1259 | if (pcpu_first_unit_cpu == NR_CPUS) | 1259 | if (pcpu_first_unit_cpu == NR_CPUS) |
1260 | pcpu_first_unit_cpu = cpu; | 1260 | pcpu_first_unit_cpu = cpu; |
1261 | pcpu_last_unit_cpu = cpu; | ||
1261 | } | 1262 | } |
1262 | } | 1263 | } |
1263 | pcpu_last_unit_cpu = cpu; | ||
1264 | pcpu_nr_units = unit; | 1264 | pcpu_nr_units = unit; |
1265 | 1265 | ||
1266 | for_each_possible_cpu(cpu) | 1266 | for_each_possible_cpu(cpu) |
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
381 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 381 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
382 | { | 382 | { |
383 | if (PageAnon(page)) { | 383 | if (PageAnon(page)) { |
384 | if (vma->anon_vma->root != page_anon_vma(page)->root) | 384 | struct anon_vma *page__anon_vma = page_anon_vma(page); |
385 | /* | ||
386 | * Note: swapoff's unuse_vma() is more efficient with this | ||
387 | * check, and needs it to match anon_vma when KSM is active. | ||
388 | */ | ||
389 | if (!vma->anon_vma || !page__anon_vma || | ||
390 | vma->anon_vma->root != page__anon_vma->root) | ||
385 | return -EFAULT; | 391 | return -EFAULT; |
386 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | 392 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
387 | if (!vma->vm_file || | 393 | if (!vma->vm_file || |
@@ -1564,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page, | |||
1564 | struct vm_area_struct *vma, unsigned long address, int exclusive) | 1570 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
1565 | { | 1571 | { |
1566 | struct anon_vma *anon_vma = vma->anon_vma; | 1572 | struct anon_vma *anon_vma = vma->anon_vma; |
1573 | |||
1567 | BUG_ON(!anon_vma); | 1574 | BUG_ON(!anon_vma); |
1568 | if (!exclusive) { | 1575 | |
1569 | struct anon_vma_chain *avc; | 1576 | if (PageAnon(page)) |
1570 | avc = list_entry(vma->anon_vma_chain.prev, | 1577 | return; |
1571 | struct anon_vma_chain, same_vma); | 1578 | if (!exclusive) |
1572 | anon_vma = avc->anon_vma; | 1579 | anon_vma = anon_vma->root; |
1573 | } | 1580 | |
1574 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 1581 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
1575 | page->mapping = (struct address_space *) anon_vma; | 1582 | page->mapping = (struct address_space *) anon_vma; |
1576 | page->index = linear_page_index(vma, address); | 1583 | page->index = linear_page_index(vma, address); |
@@ -1581,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page, | |||
1581 | { | 1588 | { |
1582 | struct anon_vma *anon_vma = vma->anon_vma; | 1589 | struct anon_vma *anon_vma = vma->anon_vma; |
1583 | int first; | 1590 | int first; |
1591 | |||
1592 | BUG_ON(!PageLocked(page)); | ||
1584 | BUG_ON(!anon_vma); | 1593 | BUG_ON(!anon_vma); |
1585 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1594 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
1586 | first = atomic_inc_and_test(&page->_mapcount); | 1595 | first = atomic_inc_and_test(&page->_mapcount); |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index aa33fd67fa41..29d6cbffb283 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -220,18 +220,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
220 | 220 | ||
221 | if (vmemmap_buf_start) { | 221 | if (vmemmap_buf_start) { |
222 | /* need to free left buf */ | 222 | /* need to free left buf */ |
223 | #ifdef CONFIG_NO_BOOTMEM | ||
224 | free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end)); | ||
225 | if (vmemmap_buf_start < vmemmap_buf) { | ||
226 | char name[15]; | ||
227 | |||
228 | snprintf(name, sizeof(name), "MEMMAP %d", nodeid); | ||
229 | reserve_early_without_check(__pa(vmemmap_buf_start), | ||
230 | __pa(vmemmap_buf), name); | ||
231 | } | ||
232 | #else | ||
233 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); | 223 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); |
234 | #endif | ||
235 | vmemmap_buf = NULL; | 224 | vmemmap_buf = NULL; |
236 | vmemmap_buf_end = NULL; | 225 | vmemmap_buf_end = NULL; |
237 | } | 226 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1f3f9c59a73a..9fc7bac7db0c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -47,8 +47,6 @@ long nr_swap_pages; | |||
47 | long total_swap_pages; | 47 | long total_swap_pages; |
48 | static int least_priority; | 48 | static int least_priority; |
49 | 49 | ||
50 | static bool swap_for_hibernation; | ||
51 | |||
52 | static const char Bad_file[] = "Bad swap file entry "; | 50 | static const char Bad_file[] = "Bad swap file entry "; |
53 | static const char Unused_file[] = "Unused swap file entry "; | 51 | static const char Unused_file[] = "Unused swap file entry "; |
54 | static const char Bad_offset[] = "Bad swap offset entry "; | 52 | static const char Bad_offset[] = "Bad swap offset entry "; |
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
141 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); | 139 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); |
142 | if (nr_blocks) { | 140 | if (nr_blocks) { |
143 | err = blkdev_issue_discard(si->bdev, start_block, | 141 | err = blkdev_issue_discard(si->bdev, start_block, |
144 | nr_blocks, GFP_KERNEL, | 142 | nr_blocks, GFP_KERNEL, 0); |
145 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
146 | if (err) | 143 | if (err) |
147 | return err; | 144 | return err; |
148 | cond_resched(); | 145 | cond_resched(); |
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
153 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); | 150 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); |
154 | 151 | ||
155 | err = blkdev_issue_discard(si->bdev, start_block, | 152 | err = blkdev_issue_discard(si->bdev, start_block, |
156 | nr_blocks, GFP_KERNEL, | 153 | nr_blocks, GFP_KERNEL, 0); |
157 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
158 | if (err) | 154 | if (err) |
159 | break; | 155 | break; |
160 | 156 | ||
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, | |||
193 | start_block <<= PAGE_SHIFT - 9; | 189 | start_block <<= PAGE_SHIFT - 9; |
194 | nr_blocks <<= PAGE_SHIFT - 9; | 190 | nr_blocks <<= PAGE_SHIFT - 9; |
195 | if (blkdev_issue_discard(si->bdev, start_block, | 191 | if (blkdev_issue_discard(si->bdev, start_block, |
196 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | | 192 | nr_blocks, GFP_NOIO, 0)) |
197 | BLKDEV_IFL_BARRIER)) | ||
198 | break; | 193 | break; |
199 | } | 194 | } |
200 | 195 | ||
@@ -320,10 +315,8 @@ checks: | |||
320 | if (offset > si->highest_bit) | 315 | if (offset > si->highest_bit) |
321 | scan_base = offset = si->lowest_bit; | 316 | scan_base = offset = si->lowest_bit; |
322 | 317 | ||
323 | /* reuse swap entry of cache-only swap if not hibernation. */ | 318 | /* reuse swap entry of cache-only swap if not busy. */ |
324 | if (vm_swap_full() | 319 | if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { |
325 | && usage == SWAP_HAS_CACHE | ||
326 | && si->swap_map[offset] == SWAP_HAS_CACHE) { | ||
327 | int swap_was_freed; | 320 | int swap_was_freed; |
328 | spin_unlock(&swap_lock); | 321 | spin_unlock(&swap_lock); |
329 | swap_was_freed = __try_to_reclaim_swap(si, offset); | 322 | swap_was_freed = __try_to_reclaim_swap(si, offset); |
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void) | |||
453 | spin_lock(&swap_lock); | 446 | spin_lock(&swap_lock); |
454 | if (nr_swap_pages <= 0) | 447 | if (nr_swap_pages <= 0) |
455 | goto noswap; | 448 | goto noswap; |
456 | if (swap_for_hibernation) | ||
457 | goto noswap; | ||
458 | nr_swap_pages--; | 449 | nr_swap_pages--; |
459 | 450 | ||
460 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 451 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { |
@@ -487,6 +478,28 @@ noswap: | |||
487 | return (swp_entry_t) {0}; | 478 | return (swp_entry_t) {0}; |
488 | } | 479 | } |
489 | 480 | ||
481 | /* The only caller of this function is now susupend routine */ | ||
482 | swp_entry_t get_swap_page_of_type(int type) | ||
483 | { | ||
484 | struct swap_info_struct *si; | ||
485 | pgoff_t offset; | ||
486 | |||
487 | spin_lock(&swap_lock); | ||
488 | si = swap_info[type]; | ||
489 | if (si && (si->flags & SWP_WRITEOK)) { | ||
490 | nr_swap_pages--; | ||
491 | /* This is called for allocating swap entry, not cache */ | ||
492 | offset = scan_swap_map(si, 1); | ||
493 | if (offset) { | ||
494 | spin_unlock(&swap_lock); | ||
495 | return swp_entry(type, offset); | ||
496 | } | ||
497 | nr_swap_pages++; | ||
498 | } | ||
499 | spin_unlock(&swap_lock); | ||
500 | return (swp_entry_t) {0}; | ||
501 | } | ||
502 | |||
490 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) | 503 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) |
491 | { | 504 | { |
492 | struct swap_info_struct *p; | 505 | struct swap_info_struct *p; |
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page) | |||
670 | if (page_swapcount(page)) | 683 | if (page_swapcount(page)) |
671 | return 0; | 684 | return 0; |
672 | 685 | ||
686 | /* | ||
687 | * Once hibernation has begun to create its image of memory, | ||
688 | * there's a danger that one of the calls to try_to_free_swap() | ||
689 | * - most probably a call from __try_to_reclaim_swap() while | ||
690 | * hibernation is allocating its own swap pages for the image, | ||
691 | * but conceivably even a call from memory reclaim - will free | ||
692 | * the swap from a page which has already been recorded in the | ||
693 | * image as a clean swapcache page, and then reuse its swap for | ||
694 | * another page of the image. On waking from hibernation, the | ||
695 | * original page might be freed under memory pressure, then | ||
696 | * later read back in from swap, now with the wrong data. | ||
697 | * | ||
698 | * Hibernation clears bits from gfp_allowed_mask to prevent | ||
699 | * memory reclaim from writing to disk, so check that here. | ||
700 | */ | ||
701 | if (!(gfp_allowed_mask & __GFP_IO)) | ||
702 | return 0; | ||
703 | |||
673 | delete_from_swap_cache(page); | 704 | delete_from_swap_cache(page); |
674 | SetPageDirty(page); | 705 | SetPageDirty(page); |
675 | return 1; | 706 | return 1; |
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) | |||
746 | #endif | 777 | #endif |
747 | 778 | ||
748 | #ifdef CONFIG_HIBERNATION | 779 | #ifdef CONFIG_HIBERNATION |
749 | |||
750 | static pgoff_t hibernation_offset[MAX_SWAPFILES]; | ||
751 | /* | ||
752 | * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, | ||
753 | * saved swap_map[] image to the disk will be an incomplete because it's | ||
754 | * changing without synchronization with hibernation snap shot. | ||
755 | * At resume, we just make swap_for_hibernation=false. We can forget | ||
756 | * used maps easily. | ||
757 | */ | ||
758 | void hibernation_freeze_swap(void) | ||
759 | { | ||
760 | int i; | ||
761 | |||
762 | spin_lock(&swap_lock); | ||
763 | |||
764 | printk(KERN_INFO "PM: Freeze Swap\n"); | ||
765 | swap_for_hibernation = true; | ||
766 | for (i = 0; i < MAX_SWAPFILES; i++) | ||
767 | hibernation_offset[i] = 1; | ||
768 | spin_unlock(&swap_lock); | ||
769 | } | ||
770 | |||
771 | void hibernation_thaw_swap(void) | ||
772 | { | ||
773 | spin_lock(&swap_lock); | ||
774 | if (swap_for_hibernation) { | ||
775 | printk(KERN_INFO "PM: Thaw Swap\n"); | ||
776 | swap_for_hibernation = false; | ||
777 | } | ||
778 | spin_unlock(&swap_lock); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Because updateing swap_map[] can make not-saved-status-change, | ||
783 | * we use our own easy allocator. | ||
784 | * Please see kernel/power/swap.c, Used swaps are recorded into | ||
785 | * RB-tree. | ||
786 | */ | ||
787 | swp_entry_t get_swap_for_hibernation(int type) | ||
788 | { | ||
789 | pgoff_t off; | ||
790 | swp_entry_t val = {0}; | ||
791 | struct swap_info_struct *si; | ||
792 | |||
793 | spin_lock(&swap_lock); | ||
794 | |||
795 | si = swap_info[type]; | ||
796 | if (!si || !(si->flags & SWP_WRITEOK)) | ||
797 | goto done; | ||
798 | |||
799 | for (off = hibernation_offset[type]; off < si->max; ++off) { | ||
800 | if (!si->swap_map[off]) | ||
801 | break; | ||
802 | } | ||
803 | if (off < si->max) { | ||
804 | val = swp_entry(type, off); | ||
805 | hibernation_offset[type] = off + 1; | ||
806 | } | ||
807 | done: | ||
808 | spin_unlock(&swap_lock); | ||
809 | return val; | ||
810 | } | ||
811 | |||
812 | void swap_free_for_hibernation(swp_entry_t ent) | ||
813 | { | ||
814 | /* Nothing to do */ | ||
815 | } | ||
816 | |||
817 | /* | 780 | /* |
818 | * Find the swap type that corresponds to given device (if any). | 781 | * Find the swap type that corresponds to given device (if any). |
819 | * | 782 | * |
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2084 | p->flags |= SWP_SOLIDSTATE; | 2047 | p->flags |= SWP_SOLIDSTATE; |
2085 | p->cluster_next = 1 + (random32() % p->highest_bit); | 2048 | p->cluster_next = 1 + (random32() % p->highest_bit); |
2086 | } | 2049 | } |
2087 | if (discard_swap(p) == 0) | 2050 | if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) |
2088 | p->flags |= SWP_DISCARDABLE; | 2051 | p->flags |= SWP_DISCARDABLE; |
2089 | } | 2052 | } |
2090 | 2053 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c623e0ce3f00..9f909622a25e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -517,6 +517,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); | |||
517 | static void purge_fragmented_blocks_allcpus(void); | 517 | static void purge_fragmented_blocks_allcpus(void); |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * called before a call to iounmap() if the caller wants vm_area_struct's | ||
521 | * immediately freed. | ||
522 | */ | ||
523 | void set_iounmap_nonlazy(void) | ||
524 | { | ||
525 | atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); | ||
526 | } | ||
527 | |||
528 | /* | ||
520 | * Purges all lazily-freed vmap areas. | 529 | * Purges all lazily-freed vmap areas. |
521 | * | 530 | * |
522 | * If sync is 0 then don't purge if there is already a purge in progress. | 531 | * If sync is 0 then don't purge if there is already a purge in progress. |
diff --git a/mm/vmscan.c b/mm/vmscan.c index c391c320dbaf..c5dfabf25f11 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1804 | * If a zone is deemed to be full of pinned pages then just give it a light | 1804 | * If a zone is deemed to be full of pinned pages then just give it a light |
1805 | * scan then give up on it. | 1805 | * scan then give up on it. |
1806 | */ | 1806 | */ |
1807 | static bool shrink_zones(int priority, struct zonelist *zonelist, | 1807 | static void shrink_zones(int priority, struct zonelist *zonelist, |
1808 | struct scan_control *sc) | 1808 | struct scan_control *sc) |
1809 | { | 1809 | { |
1810 | struct zoneref *z; | 1810 | struct zoneref *z; |
1811 | struct zone *zone; | 1811 | struct zone *zone; |
1812 | bool all_unreclaimable = true; | ||
1813 | 1812 | ||
1814 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 1813 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
1815 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 1814 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist, | |||
1827 | } | 1826 | } |
1828 | 1827 | ||
1829 | shrink_zone(priority, zone, sc); | 1828 | shrink_zone(priority, zone, sc); |
1830 | all_unreclaimable = false; | ||
1831 | } | 1829 | } |
1830 | } | ||
1831 | |||
1832 | static bool zone_reclaimable(struct zone *zone) | ||
1833 | { | ||
1834 | return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; | ||
1835 | } | ||
1836 | |||
1837 | /* | ||
1838 | * As hibernation is going on, kswapd is freezed so that it can't mark | ||
1839 | * the zone into all_unreclaimable. It can't handle OOM during hibernation. | ||
1840 | * So let's check zone's unreclaimable in direct reclaim as well as kswapd. | ||
1841 | */ | ||
1842 | static bool all_unreclaimable(struct zonelist *zonelist, | ||
1843 | struct scan_control *sc) | ||
1844 | { | ||
1845 | struct zoneref *z; | ||
1846 | struct zone *zone; | ||
1847 | bool all_unreclaimable = true; | ||
1848 | |||
1849 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | ||
1850 | gfp_zone(sc->gfp_mask), sc->nodemask) { | ||
1851 | if (!populated_zone(zone)) | ||
1852 | continue; | ||
1853 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | ||
1854 | continue; | ||
1855 | if (zone_reclaimable(zone)) { | ||
1856 | all_unreclaimable = false; | ||
1857 | break; | ||
1858 | } | ||
1859 | } | ||
1860 | |||
1832 | return all_unreclaimable; | 1861 | return all_unreclaimable; |
1833 | } | 1862 | } |
1834 | 1863 | ||
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1852 | struct scan_control *sc) | 1881 | struct scan_control *sc) |
1853 | { | 1882 | { |
1854 | int priority; | 1883 | int priority; |
1855 | bool all_unreclaimable; | ||
1856 | unsigned long total_scanned = 0; | 1884 | unsigned long total_scanned = 0; |
1857 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1885 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1858 | struct zoneref *z; | 1886 | struct zoneref *z; |
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1869 | sc->nr_scanned = 0; | 1897 | sc->nr_scanned = 0; |
1870 | if (!priority) | 1898 | if (!priority) |
1871 | disable_swap_token(); | 1899 | disable_swap_token(); |
1872 | all_unreclaimable = shrink_zones(priority, zonelist, sc); | 1900 | shrink_zones(priority, zonelist, sc); |
1873 | /* | 1901 | /* |
1874 | * Don't shrink slabs when reclaiming memory from | 1902 | * Don't shrink slabs when reclaiming memory from |
1875 | * over limit cgroups | 1903 | * over limit cgroups |
@@ -1931,7 +1959,7 @@ out: | |||
1931 | return sc->nr_reclaimed; | 1959 | return sc->nr_reclaimed; |
1932 | 1960 | ||
1933 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 1961 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
1934 | if (scanning_global_lru(sc) && !all_unreclaimable) | 1962 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) |
1935 | return 1; | 1963 | return 1; |
1936 | 1964 | ||
1937 | return 0; | 1965 | return 0; |
@@ -2197,8 +2225,7 @@ loop_again: | |||
2197 | total_scanned += sc.nr_scanned; | 2225 | total_scanned += sc.nr_scanned; |
2198 | if (zone->all_unreclaimable) | 2226 | if (zone->all_unreclaimable) |
2199 | continue; | 2227 | continue; |
2200 | if (nr_slab == 0 && | 2228 | if (nr_slab == 0 && !zone_reclaimable(zone)) |
2201 | zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6)) | ||
2202 | zone->all_unreclaimable = 1; | 2229 | zone->all_unreclaimable = 1; |
2203 | /* | 2230 | /* |
2204 | * If we've done a decent amount of scanning and | 2231 | * If we've done a decent amount of scanning and |
diff --git a/mm/vmstat.c b/mm/vmstat.c index f389168f9a83..355a9e669aaa 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void) | |||
138 | int threshold; | 138 | int threshold; |
139 | 139 | ||
140 | for_each_populated_zone(zone) { | 140 | for_each_populated_zone(zone) { |
141 | unsigned long max_drift, tolerate_drift; | ||
142 | |||
141 | threshold = calculate_threshold(zone); | 143 | threshold = calculate_threshold(zone); |
142 | 144 | ||
143 | for_each_online_cpu(cpu) | 145 | for_each_online_cpu(cpu) |
144 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold | 146 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold |
145 | = threshold; | 147 | = threshold; |
148 | |||
149 | /* | ||
150 | * Only set percpu_drift_mark if there is a danger that | ||
151 | * NR_FREE_PAGES reports the low watermark is ok when in fact | ||
152 | * the min watermark could be breached by an allocation | ||
153 | */ | ||
154 | tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); | ||
155 | max_drift = num_online_cpus() * threshold; | ||
156 | if (max_drift > tolerate_drift) | ||
157 | zone->percpu_drift_mark = high_wmark_pages(zone) + | ||
158 | max_drift; | ||
146 | } | 159 | } |
147 | } | 160 | } |
148 | 161 | ||
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
813 | "\n scanned %lu" | 826 | "\n scanned %lu" |
814 | "\n spanned %lu" | 827 | "\n spanned %lu" |
815 | "\n present %lu", | 828 | "\n present %lu", |
816 | zone_page_state(zone, NR_FREE_PAGES), | 829 | zone_nr_free_pages(zone), |
817 | min_wmark_pages(zone), | 830 | min_wmark_pages(zone), |
818 | low_wmark_pages(zone), | 831 | low_wmark_pages(zone), |
819 | high_wmark_pages(zone), | 832 | high_wmark_pages(zone), |
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |||
998 | switch (action) { | 1011 | switch (action) { |
999 | case CPU_ONLINE: | 1012 | case CPU_ONLINE: |
1000 | case CPU_ONLINE_FROZEN: | 1013 | case CPU_ONLINE_FROZEN: |
1014 | refresh_zone_stat_thresholds(); | ||
1001 | start_cpu_timer(cpu); | 1015 | start_cpu_timer(cpu); |
1002 | node_set_state(cpu_to_node(cpu), N_CPU); | 1016 | node_set_state(cpu_to_node(cpu), N_CPU); |
1003 | break; | 1017 | break; |