diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:00:21 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:00:21 -0400 |
commit | d06efebf0c37d438fcf07057be00dd40fcfce08d (patch) | |
tree | 31a0786d132aadf4cbb9725f3f444ef6e1052128 /mm | |
parent | bb2e226b3bef596dd56be97df655d857b4603923 (diff) | |
parent | 0a30288da1aec914e158c2d7a3482a85f632750f (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block into for-3.18
This is to receive 0a30288da1ae ("blk-mq, percpu_ref: implement a
kludge for SCSI blk-mq stall during probe") which implements
__percpu_ref_kill_expedited() to work around SCSI blk-mq stall. The
commit reverted and patches to implement proper fix will be added.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/dmapool.c | 2 | ||||
-rw-r--r-- | mm/hugetlb_cgroup.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 103 | ||||
-rw-r--r-- | mm/memory.c | 9 | ||||
-rw-r--r-- | mm/mmap.c | 16 | ||||
-rw-r--r-- | mm/nobootmem.c | 2 | ||||
-rw-r--r-- | mm/pgtable-generic.c | 2 | ||||
-rw-r--r-- | mm/zbud.c | 1 | ||||
-rw-r--r-- | mm/zpool.c | 2 | ||||
-rw-r--r-- | mm/zsmalloc.c | 1 |
11 files changed, 104 insertions, 43 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c index 306baa594f95..ba8019b063e1 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -176,7 +176,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, | |||
176 | if (list_empty(&dev->dma_pools) && | 176 | if (list_empty(&dev->dma_pools) && |
177 | device_create_file(dev, &dev_attr_pools)) { | 177 | device_create_file(dev, &dev_attr_pools)) { |
178 | kfree(retval); | 178 | kfree(retval); |
179 | return NULL; | 179 | retval = NULL; |
180 | } else | 180 | } else |
181 | list_add(&retval->pools, &dev->dma_pools); | 181 | list_add(&retval->pools, &dev->dma_pools); |
182 | mutex_unlock(&pools_lock); | 182 | mutex_unlock(&pools_lock); |
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 9eebfadeeee1..a67c26e0f360 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c | |||
@@ -217,7 +217,7 @@ void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, | |||
217 | 217 | ||
218 | if (hugetlb_cgroup_disabled()) | 218 | if (hugetlb_cgroup_disabled()) |
219 | return; | 219 | return; |
220 | VM_BUG_ON(!spin_is_locked(&hugetlb_lock)); | 220 | lockdep_assert_held(&hugetlb_lock); |
221 | h_cg = hugetlb_cgroup_from_page(page); | 221 | h_cg = hugetlb_cgroup_from_page(page); |
222 | if (unlikely(!h_cg)) | 222 | if (unlikely(!h_cg)) |
223 | return; | 223 | return; |
diff --git a/mm/memblock.c b/mm/memblock.c index 6d2f219a48b0..6ecb0d937fb5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -192,8 +192,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | |||
192 | phys_addr_t align, phys_addr_t start, | 192 | phys_addr_t align, phys_addr_t start, |
193 | phys_addr_t end, int nid) | 193 | phys_addr_t end, int nid) |
194 | { | 194 | { |
195 | int ret; | 195 | phys_addr_t kernel_end, ret; |
196 | phys_addr_t kernel_end; | ||
197 | 196 | ||
198 | /* pump up @end */ | 197 | /* pump up @end */ |
199 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 198 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
@@ -817,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, | |||
817 | if (nid != NUMA_NO_NODE && nid != m_nid) | 816 | if (nid != NUMA_NO_NODE && nid != m_nid) |
818 | continue; | 817 | continue; |
819 | 818 | ||
819 | /* skip hotpluggable memory regions if needed */ | ||
820 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) | ||
821 | continue; | ||
822 | |||
820 | if (!type_b) { | 823 | if (!type_b) { |
821 | if (out_start) | 824 | if (out_start) |
822 | *out_start = m_start; | 825 | *out_start = m_start; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ec4dcf1b9562..085dc6d2f876 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2534,6 +2534,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, | |||
2534 | unsigned long long size; | 2534 | unsigned long long size; |
2535 | int ret = 0; | 2535 | int ret = 0; |
2536 | 2536 | ||
2537 | if (mem_cgroup_is_root(memcg)) | ||
2538 | goto done; | ||
2537 | retry: | 2539 | retry: |
2538 | if (consume_stock(memcg, nr_pages)) | 2540 | if (consume_stock(memcg, nr_pages)) |
2539 | goto done; | 2541 | goto done; |
@@ -2611,9 +2613,7 @@ nomem: | |||
2611 | if (!(gfp_mask & __GFP_NOFAIL)) | 2613 | if (!(gfp_mask & __GFP_NOFAIL)) |
2612 | return -ENOMEM; | 2614 | return -ENOMEM; |
2613 | bypass: | 2615 | bypass: |
2614 | memcg = root_mem_cgroup; | 2616 | return -EINTR; |
2615 | ret = -EINTR; | ||
2616 | goto retry; | ||
2617 | 2617 | ||
2618 | done_restock: | 2618 | done_restock: |
2619 | if (batch > nr_pages) | 2619 | if (batch > nr_pages) |
@@ -2626,6 +2626,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) | |||
2626 | { | 2626 | { |
2627 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2627 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2628 | 2628 | ||
2629 | if (mem_cgroup_is_root(memcg)) | ||
2630 | return; | ||
2631 | |||
2629 | res_counter_uncharge(&memcg->res, bytes); | 2632 | res_counter_uncharge(&memcg->res, bytes); |
2630 | if (do_swap_account) | 2633 | if (do_swap_account) |
2631 | res_counter_uncharge(&memcg->memsw, bytes); | 2634 | res_counter_uncharge(&memcg->memsw, bytes); |
@@ -2640,6 +2643,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, | |||
2640 | { | 2643 | { |
2641 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2644 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2642 | 2645 | ||
2646 | if (mem_cgroup_is_root(memcg)) | ||
2647 | return; | ||
2648 | |||
2643 | res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); | 2649 | res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); |
2644 | if (do_swap_account) | 2650 | if (do_swap_account) |
2645 | res_counter_uncharge_until(&memcg->memsw, | 2651 | res_counter_uncharge_until(&memcg->memsw, |
@@ -4093,6 +4099,46 @@ out: | |||
4093 | return retval; | 4099 | return retval; |
4094 | } | 4100 | } |
4095 | 4101 | ||
4102 | static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, | ||
4103 | enum mem_cgroup_stat_index idx) | ||
4104 | { | ||
4105 | struct mem_cgroup *iter; | ||
4106 | long val = 0; | ||
4107 | |||
4108 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
4109 | for_each_mem_cgroup_tree(iter, memcg) | ||
4110 | val += mem_cgroup_read_stat(iter, idx); | ||
4111 | |||
4112 | if (val < 0) /* race ? */ | ||
4113 | val = 0; | ||
4114 | return val; | ||
4115 | } | ||
4116 | |||
4117 | static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) | ||
4118 | { | ||
4119 | u64 val; | ||
4120 | |||
4121 | if (!mem_cgroup_is_root(memcg)) { | ||
4122 | if (!swap) | ||
4123 | return res_counter_read_u64(&memcg->res, RES_USAGE); | ||
4124 | else | ||
4125 | return res_counter_read_u64(&memcg->memsw, RES_USAGE); | ||
4126 | } | ||
4127 | |||
4128 | /* | ||
4129 | * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS | ||
4130 | * as well as in MEM_CGROUP_STAT_RSS_HUGE. | ||
4131 | */ | ||
4132 | val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); | ||
4133 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); | ||
4134 | |||
4135 | if (swap) | ||
4136 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); | ||
4137 | |||
4138 | return val << PAGE_SHIFT; | ||
4139 | } | ||
4140 | |||
4141 | |||
4096 | static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | 4142 | static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, |
4097 | struct cftype *cft) | 4143 | struct cftype *cft) |
4098 | { | 4144 | { |
@@ -4102,8 +4148,12 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
4102 | 4148 | ||
4103 | switch (type) { | 4149 | switch (type) { |
4104 | case _MEM: | 4150 | case _MEM: |
4151 | if (name == RES_USAGE) | ||
4152 | return mem_cgroup_usage(memcg, false); | ||
4105 | return res_counter_read_u64(&memcg->res, name); | 4153 | return res_counter_read_u64(&memcg->res, name); |
4106 | case _MEMSWAP: | 4154 | case _MEMSWAP: |
4155 | if (name == RES_USAGE) | ||
4156 | return mem_cgroup_usage(memcg, true); | ||
4107 | return res_counter_read_u64(&memcg->memsw, name); | 4157 | return res_counter_read_u64(&memcg->memsw, name); |
4108 | case _KMEM: | 4158 | case _KMEM: |
4109 | return res_counter_read_u64(&memcg->kmem, name); | 4159 | return res_counter_read_u64(&memcg->kmem, name); |
@@ -4572,10 +4622,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) | |||
4572 | if (!t) | 4622 | if (!t) |
4573 | goto unlock; | 4623 | goto unlock; |
4574 | 4624 | ||
4575 | if (!swap) | 4625 | usage = mem_cgroup_usage(memcg, swap); |
4576 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | ||
4577 | else | ||
4578 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | ||
4579 | 4626 | ||
4580 | /* | 4627 | /* |
4581 | * current_threshold points to threshold just below or equal to usage. | 4628 | * current_threshold points to threshold just below or equal to usage. |
@@ -4673,10 +4720,10 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, | |||
4673 | 4720 | ||
4674 | if (type == _MEM) { | 4721 | if (type == _MEM) { |
4675 | thresholds = &memcg->thresholds; | 4722 | thresholds = &memcg->thresholds; |
4676 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4723 | usage = mem_cgroup_usage(memcg, false); |
4677 | } else if (type == _MEMSWAP) { | 4724 | } else if (type == _MEMSWAP) { |
4678 | thresholds = &memcg->memsw_thresholds; | 4725 | thresholds = &memcg->memsw_thresholds; |
4679 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4726 | usage = mem_cgroup_usage(memcg, true); |
4680 | } else | 4727 | } else |
4681 | BUG(); | 4728 | BUG(); |
4682 | 4729 | ||
@@ -4762,10 +4809,10 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, | |||
4762 | 4809 | ||
4763 | if (type == _MEM) { | 4810 | if (type == _MEM) { |
4764 | thresholds = &memcg->thresholds; | 4811 | thresholds = &memcg->thresholds; |
4765 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4812 | usage = mem_cgroup_usage(memcg, false); |
4766 | } else if (type == _MEMSWAP) { | 4813 | } else if (type == _MEMSWAP) { |
4767 | thresholds = &memcg->memsw_thresholds; | 4814 | thresholds = &memcg->memsw_thresholds; |
4768 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4815 | usage = mem_cgroup_usage(memcg, true); |
4769 | } else | 4816 | } else |
4770 | BUG(); | 4817 | BUG(); |
4771 | 4818 | ||
@@ -5525,9 +5572,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5525 | * core guarantees its existence. | 5572 | * core guarantees its existence. |
5526 | */ | 5573 | */ |
5527 | } else { | 5574 | } else { |
5528 | res_counter_init(&memcg->res, &root_mem_cgroup->res); | 5575 | res_counter_init(&memcg->res, NULL); |
5529 | res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw); | 5576 | res_counter_init(&memcg->memsw, NULL); |
5530 | res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); | 5577 | res_counter_init(&memcg->kmem, NULL); |
5531 | /* | 5578 | /* |
5532 | * Deeper hierachy with use_hierarchy == false doesn't make | 5579 | * Deeper hierachy with use_hierarchy == false doesn't make |
5533 | * much sense so let cgroup subsystem know about this | 5580 | * much sense so let cgroup subsystem know about this |
@@ -5969,8 +6016,9 @@ static void __mem_cgroup_clear_mc(void) | |||
5969 | /* we must fixup refcnts and charges */ | 6016 | /* we must fixup refcnts and charges */ |
5970 | if (mc.moved_swap) { | 6017 | if (mc.moved_swap) { |
5971 | /* uncharge swap account from the old cgroup */ | 6018 | /* uncharge swap account from the old cgroup */ |
5972 | res_counter_uncharge(&mc.from->memsw, | 6019 | if (!mem_cgroup_is_root(mc.from)) |
5973 | PAGE_SIZE * mc.moved_swap); | 6020 | res_counter_uncharge(&mc.from->memsw, |
6021 | PAGE_SIZE * mc.moved_swap); | ||
5974 | 6022 | ||
5975 | for (i = 0; i < mc.moved_swap; i++) | 6023 | for (i = 0; i < mc.moved_swap; i++) |
5976 | css_put(&mc.from->css); | 6024 | css_put(&mc.from->css); |
@@ -5979,8 +6027,9 @@ static void __mem_cgroup_clear_mc(void) | |||
5979 | * we charged both to->res and to->memsw, so we should | 6027 | * we charged both to->res and to->memsw, so we should |
5980 | * uncharge to->res. | 6028 | * uncharge to->res. |
5981 | */ | 6029 | */ |
5982 | res_counter_uncharge(&mc.to->res, | 6030 | if (!mem_cgroup_is_root(mc.to)) |
5983 | PAGE_SIZE * mc.moved_swap); | 6031 | res_counter_uncharge(&mc.to->res, |
6032 | PAGE_SIZE * mc.moved_swap); | ||
5984 | /* we've already done css_get(mc.to) */ | 6033 | /* we've already done css_get(mc.to) */ |
5985 | mc.moved_swap = 0; | 6034 | mc.moved_swap = 0; |
5986 | } | 6035 | } |
@@ -6345,7 +6394,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) | |||
6345 | rcu_read_lock(); | 6394 | rcu_read_lock(); |
6346 | memcg = mem_cgroup_lookup(id); | 6395 | memcg = mem_cgroup_lookup(id); |
6347 | if (memcg) { | 6396 | if (memcg) { |
6348 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 6397 | if (!mem_cgroup_is_root(memcg)) |
6398 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | ||
6349 | mem_cgroup_swap_statistics(memcg, false); | 6399 | mem_cgroup_swap_statistics(memcg, false); |
6350 | css_put(&memcg->css); | 6400 | css_put(&memcg->css); |
6351 | } | 6401 | } |
@@ -6509,12 +6559,15 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, | |||
6509 | { | 6559 | { |
6510 | unsigned long flags; | 6560 | unsigned long flags; |
6511 | 6561 | ||
6512 | if (nr_mem) | 6562 | if (!mem_cgroup_is_root(memcg)) { |
6513 | res_counter_uncharge(&memcg->res, nr_mem * PAGE_SIZE); | 6563 | if (nr_mem) |
6514 | if (nr_memsw) | 6564 | res_counter_uncharge(&memcg->res, |
6515 | res_counter_uncharge(&memcg->memsw, nr_memsw * PAGE_SIZE); | 6565 | nr_mem * PAGE_SIZE); |
6516 | 6566 | if (nr_memsw) | |
6517 | memcg_oom_recover(memcg); | 6567 | res_counter_uncharge(&memcg->memsw, |
6568 | nr_memsw * PAGE_SIZE); | ||
6569 | memcg_oom_recover(memcg); | ||
6570 | } | ||
6518 | 6571 | ||
6519 | local_irq_save(flags); | 6572 | local_irq_save(flags); |
6520 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); | 6573 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); |
diff --git a/mm/memory.c b/mm/memory.c index ab3537bcfed2..d17f1bcd2a91 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps); | |||
118 | unsigned long zero_pfn __read_mostly; | 118 | unsigned long zero_pfn __read_mostly; |
119 | unsigned long highest_memmap_pfn __read_mostly; | 119 | unsigned long highest_memmap_pfn __read_mostly; |
120 | 120 | ||
121 | EXPORT_SYMBOL(zero_pfn); | ||
122 | |||
121 | /* | 123 | /* |
122 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() | 124 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
123 | */ | 125 | */ |
@@ -751,7 +753,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
751 | unsigned long pfn = pte_pfn(pte); | 753 | unsigned long pfn = pte_pfn(pte); |
752 | 754 | ||
753 | if (HAVE_PTE_SPECIAL) { | 755 | if (HAVE_PTE_SPECIAL) { |
754 | if (likely(!pte_special(pte) || pte_numa(pte))) | 756 | if (likely(!pte_special(pte))) |
755 | goto check_pfn; | 757 | goto check_pfn; |
756 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) | 758 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
757 | return NULL; | 759 | return NULL; |
@@ -777,15 +779,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
777 | } | 779 | } |
778 | } | 780 | } |
779 | 781 | ||
782 | if (is_zero_pfn(pfn)) | ||
783 | return NULL; | ||
780 | check_pfn: | 784 | check_pfn: |
781 | if (unlikely(pfn > highest_memmap_pfn)) { | 785 | if (unlikely(pfn > highest_memmap_pfn)) { |
782 | print_bad_pte(vma, addr, pte, NULL); | 786 | print_bad_pte(vma, addr, pte, NULL); |
783 | return NULL; | 787 | return NULL; |
784 | } | 788 | } |
785 | 789 | ||
786 | if (is_zero_pfn(pfn)) | ||
787 | return NULL; | ||
788 | |||
789 | /* | 790 | /* |
790 | * NOTE! We still have PageReserved() pages in the page tables. | 791 | * NOTE! We still have PageReserved() pages in the page tables. |
791 | * eg. VDSO mappings can cause them to exist. | 792 | * eg. VDSO mappings can cause them to exist. |
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root) | |||
369 | struct vm_area_struct *vma; | 369 | struct vm_area_struct *vma; |
370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); | 370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); |
371 | if (vma->vm_start < prev) { | 371 | if (vma->vm_start < prev) { |
372 | pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev); | 372 | pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev); |
373 | bug = 1; | 373 | bug = 1; |
374 | } | 374 | } |
375 | if (vma->vm_start < pend) { | 375 | if (vma->vm_start < pend) { |
376 | pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend); | 376 | pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend); |
377 | bug = 1; | 377 | bug = 1; |
378 | } | 378 | } |
379 | if (vma->vm_start > vma->vm_end) { | 379 | if (vma->vm_start > vma->vm_end) { |
380 | pr_info("vm_end %lx < vm_start %lx\n", | 380 | pr_emerg("vm_end %lx < vm_start %lx\n", |
381 | vma->vm_end, vma->vm_start); | 381 | vma->vm_end, vma->vm_start); |
382 | bug = 1; | 382 | bug = 1; |
383 | } | 383 | } |
384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { | 384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { |
385 | pr_info("free gap %lx, correct %lx\n", | 385 | pr_emerg("free gap %lx, correct %lx\n", |
386 | vma->rb_subtree_gap, | 386 | vma->rb_subtree_gap, |
387 | vma_compute_subtree_gap(vma)); | 387 | vma_compute_subtree_gap(vma)); |
388 | bug = 1; | 388 | bug = 1; |
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root) | |||
396 | for (nd = pn; nd; nd = rb_prev(nd)) | 396 | for (nd = pn; nd; nd = rb_prev(nd)) |
397 | j++; | 397 | j++; |
398 | if (i != j) { | 398 | if (i != j) { |
399 | pr_info("backwards %d, forwards %d\n", j, i); | 399 | pr_emerg("backwards %d, forwards %d\n", j, i); |
400 | bug = 1; | 400 | bug = 1; |
401 | } | 401 | } |
402 | return bug ? -1 : i; | 402 | return bug ? -1 : i; |
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm) | |||
431 | i++; | 431 | i++; |
432 | } | 432 | } |
433 | if (i != mm->map_count) { | 433 | if (i != mm->map_count) { |
434 | pr_info("map_count %d vm_next %d\n", mm->map_count, i); | 434 | pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); |
435 | bug = 1; | 435 | bug = 1; |
436 | } | 436 | } |
437 | if (highest_address != mm->highest_vm_end) { | 437 | if (highest_address != mm->highest_vm_end) { |
438 | pr_info("mm->highest_vm_end %lx, found %lx\n", | 438 | pr_emerg("mm->highest_vm_end %lx, found %lx\n", |
439 | mm->highest_vm_end, highest_address); | 439 | mm->highest_vm_end, highest_address); |
440 | bug = 1; | 440 | bug = 1; |
441 | } | 441 | } |
442 | i = browse_rb(&mm->mm_rb); | 442 | i = browse_rb(&mm->mm_rb); |
443 | if (i != mm->map_count) { | 443 | if (i != mm->map_count) { |
444 | pr_info("map_count %d rb %d\n", mm->map_count, i); | 444 | pr_emerg("map_count %d rb %d\n", mm->map_count, i); |
445 | bug = 1; | 445 | bug = 1; |
446 | } | 446 | } |
447 | BUG_ON(bug); | 447 | BUG_ON(bug); |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 7ed58602e71b..7c7ab32ee503 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void) | |||
119 | phys_addr_t start, end; | 119 | phys_addr_t start, end; |
120 | u64 i; | 120 | u64 i; |
121 | 121 | ||
122 | memblock_clear_hotplug(0, -1); | ||
123 | |||
122 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) | 124 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) |
123 | count += __free_memory_core(start, end); | 125 | count += __free_memory_core(start, end); |
124 | 126 | ||
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index a8b919925934..dfb79e028ecb 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |||
195 | pmd_t entry = *pmdp; | 195 | pmd_t entry = *pmdp; |
196 | if (pmd_numa(entry)) | 196 | if (pmd_numa(entry)) |
197 | entry = pmd_mknonnuma(entry); | 197 | entry = pmd_mknonnuma(entry); |
198 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); | 198 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); |
199 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | 199 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
200 | } | 200 | } |
201 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 201 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
@@ -195,6 +195,7 @@ static struct zpool_driver zbud_zpool_driver = { | |||
195 | .total_size = zbud_zpool_total_size, | 195 | .total_size = zbud_zpool_total_size, |
196 | }; | 196 | }; |
197 | 197 | ||
198 | MODULE_ALIAS("zpool-zbud"); | ||
198 | #endif /* CONFIG_ZPOOL */ | 199 | #endif /* CONFIG_ZPOOL */ |
199 | 200 | ||
200 | /***************** | 201 | /***************** |
diff --git a/mm/zpool.c b/mm/zpool.c index e40612a1df00..739cdf0d183a 100644 --- a/mm/zpool.c +++ b/mm/zpool.c | |||
@@ -150,7 +150,7 @@ struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops) | |||
150 | driver = zpool_get_driver(type); | 150 | driver = zpool_get_driver(type); |
151 | 151 | ||
152 | if (!driver) { | 152 | if (!driver) { |
153 | request_module(type); | 153 | request_module("zpool-%s", type); |
154 | driver = zpool_get_driver(type); | 154 | driver = zpool_get_driver(type); |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 4e2fc83cb394..94f38fac5e81 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -315,6 +315,7 @@ static struct zpool_driver zs_zpool_driver = { | |||
315 | .total_size = zs_zpool_total_size, | 315 | .total_size = zs_zpool_total_size, |
316 | }; | 316 | }; |
317 | 317 | ||
318 | MODULE_ALIAS("zpool-zsmalloc"); | ||
318 | #endif /* CONFIG_ZPOOL */ | 319 | #endif /* CONFIG_ZPOOL */ |
319 | 320 | ||
320 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ | 321 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |