diff options
author | Jens Axboe <axboe@fb.com> | 2014-09-22 13:57:32 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 13:57:32 -0400 |
commit | 6d11fb454b161a4565c57be6f1c5527235741003 (patch) | |
tree | c238ed3df2f654181c2a0746478a33b32214cc60 /mm | |
parent | b207892b061da7608878e273ae22ba9bf9be264b (diff) | |
parent | 8b95741569eabc5eb17da71d1d3668cdb0bef86c (diff) |
Merge branch 'for-linus' into for-3.18/core
Moving patches from for-linus to 3.18 instead, pull in this changes
that will go to Linus today.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memblock.c | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 103 | ||||
-rw-r--r-- | mm/mmap.c | 16 | ||||
-rw-r--r-- | mm/nobootmem.c | 2 | ||||
-rw-r--r-- | mm/percpu-vm.c | 22 | ||||
-rw-r--r-- | mm/percpu.c | 2 |
6 files changed, 110 insertions, 39 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 70fad0c0dafb..6ecb0d937fb5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, | |||
816 | if (nid != NUMA_NO_NODE && nid != m_nid) | 816 | if (nid != NUMA_NO_NODE && nid != m_nid) |
817 | continue; | 817 | continue; |
818 | 818 | ||
819 | /* skip hotpluggable memory regions if needed */ | ||
820 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) | ||
821 | continue; | ||
822 | |||
819 | if (!type_b) { | 823 | if (!type_b) { |
820 | if (out_start) | 824 | if (out_start) |
821 | *out_start = m_start; | 825 | *out_start = m_start; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ec4dcf1b9562..085dc6d2f876 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2534,6 +2534,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, | |||
2534 | unsigned long long size; | 2534 | unsigned long long size; |
2535 | int ret = 0; | 2535 | int ret = 0; |
2536 | 2536 | ||
2537 | if (mem_cgroup_is_root(memcg)) | ||
2538 | goto done; | ||
2537 | retry: | 2539 | retry: |
2538 | if (consume_stock(memcg, nr_pages)) | 2540 | if (consume_stock(memcg, nr_pages)) |
2539 | goto done; | 2541 | goto done; |
@@ -2611,9 +2613,7 @@ nomem: | |||
2611 | if (!(gfp_mask & __GFP_NOFAIL)) | 2613 | if (!(gfp_mask & __GFP_NOFAIL)) |
2612 | return -ENOMEM; | 2614 | return -ENOMEM; |
2613 | bypass: | 2615 | bypass: |
2614 | memcg = root_mem_cgroup; | 2616 | return -EINTR; |
2615 | ret = -EINTR; | ||
2616 | goto retry; | ||
2617 | 2617 | ||
2618 | done_restock: | 2618 | done_restock: |
2619 | if (batch > nr_pages) | 2619 | if (batch > nr_pages) |
@@ -2626,6 +2626,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) | |||
2626 | { | 2626 | { |
2627 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2627 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2628 | 2628 | ||
2629 | if (mem_cgroup_is_root(memcg)) | ||
2630 | return; | ||
2631 | |||
2629 | res_counter_uncharge(&memcg->res, bytes); | 2632 | res_counter_uncharge(&memcg->res, bytes); |
2630 | if (do_swap_account) | 2633 | if (do_swap_account) |
2631 | res_counter_uncharge(&memcg->memsw, bytes); | 2634 | res_counter_uncharge(&memcg->memsw, bytes); |
@@ -2640,6 +2643,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, | |||
2640 | { | 2643 | { |
2641 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2644 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2642 | 2645 | ||
2646 | if (mem_cgroup_is_root(memcg)) | ||
2647 | return; | ||
2648 | |||
2643 | res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); | 2649 | res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); |
2644 | if (do_swap_account) | 2650 | if (do_swap_account) |
2645 | res_counter_uncharge_until(&memcg->memsw, | 2651 | res_counter_uncharge_until(&memcg->memsw, |
@@ -4093,6 +4099,46 @@ out: | |||
4093 | return retval; | 4099 | return retval; |
4094 | } | 4100 | } |
4095 | 4101 | ||
4102 | static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, | ||
4103 | enum mem_cgroup_stat_index idx) | ||
4104 | { | ||
4105 | struct mem_cgroup *iter; | ||
4106 | long val = 0; | ||
4107 | |||
4108 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
4109 | for_each_mem_cgroup_tree(iter, memcg) | ||
4110 | val += mem_cgroup_read_stat(iter, idx); | ||
4111 | |||
4112 | if (val < 0) /* race ? */ | ||
4113 | val = 0; | ||
4114 | return val; | ||
4115 | } | ||
4116 | |||
4117 | static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) | ||
4118 | { | ||
4119 | u64 val; | ||
4120 | |||
4121 | if (!mem_cgroup_is_root(memcg)) { | ||
4122 | if (!swap) | ||
4123 | return res_counter_read_u64(&memcg->res, RES_USAGE); | ||
4124 | else | ||
4125 | return res_counter_read_u64(&memcg->memsw, RES_USAGE); | ||
4126 | } | ||
4127 | |||
4128 | /* | ||
4129 | * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS | ||
4130 | * as well as in MEM_CGROUP_STAT_RSS_HUGE. | ||
4131 | */ | ||
4132 | val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); | ||
4133 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); | ||
4134 | |||
4135 | if (swap) | ||
4136 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); | ||
4137 | |||
4138 | return val << PAGE_SHIFT; | ||
4139 | } | ||
4140 | |||
4141 | |||
4096 | static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | 4142 | static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, |
4097 | struct cftype *cft) | 4143 | struct cftype *cft) |
4098 | { | 4144 | { |
@@ -4102,8 +4148,12 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
4102 | 4148 | ||
4103 | switch (type) { | 4149 | switch (type) { |
4104 | case _MEM: | 4150 | case _MEM: |
4151 | if (name == RES_USAGE) | ||
4152 | return mem_cgroup_usage(memcg, false); | ||
4105 | return res_counter_read_u64(&memcg->res, name); | 4153 | return res_counter_read_u64(&memcg->res, name); |
4106 | case _MEMSWAP: | 4154 | case _MEMSWAP: |
4155 | if (name == RES_USAGE) | ||
4156 | return mem_cgroup_usage(memcg, true); | ||
4107 | return res_counter_read_u64(&memcg->memsw, name); | 4157 | return res_counter_read_u64(&memcg->memsw, name); |
4108 | case _KMEM: | 4158 | case _KMEM: |
4109 | return res_counter_read_u64(&memcg->kmem, name); | 4159 | return res_counter_read_u64(&memcg->kmem, name); |
@@ -4572,10 +4622,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) | |||
4572 | if (!t) | 4622 | if (!t) |
4573 | goto unlock; | 4623 | goto unlock; |
4574 | 4624 | ||
4575 | if (!swap) | 4625 | usage = mem_cgroup_usage(memcg, swap); |
4576 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | ||
4577 | else | ||
4578 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | ||
4579 | 4626 | ||
4580 | /* | 4627 | /* |
4581 | * current_threshold points to threshold just below or equal to usage. | 4628 | * current_threshold points to threshold just below or equal to usage. |
@@ -4673,10 +4720,10 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, | |||
4673 | 4720 | ||
4674 | if (type == _MEM) { | 4721 | if (type == _MEM) { |
4675 | thresholds = &memcg->thresholds; | 4722 | thresholds = &memcg->thresholds; |
4676 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4723 | usage = mem_cgroup_usage(memcg, false); |
4677 | } else if (type == _MEMSWAP) { | 4724 | } else if (type == _MEMSWAP) { |
4678 | thresholds = &memcg->memsw_thresholds; | 4725 | thresholds = &memcg->memsw_thresholds; |
4679 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4726 | usage = mem_cgroup_usage(memcg, true); |
4680 | } else | 4727 | } else |
4681 | BUG(); | 4728 | BUG(); |
4682 | 4729 | ||
@@ -4762,10 +4809,10 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, | |||
4762 | 4809 | ||
4763 | if (type == _MEM) { | 4810 | if (type == _MEM) { |
4764 | thresholds = &memcg->thresholds; | 4811 | thresholds = &memcg->thresholds; |
4765 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4812 | usage = mem_cgroup_usage(memcg, false); |
4766 | } else if (type == _MEMSWAP) { | 4813 | } else if (type == _MEMSWAP) { |
4767 | thresholds = &memcg->memsw_thresholds; | 4814 | thresholds = &memcg->memsw_thresholds; |
4768 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4815 | usage = mem_cgroup_usage(memcg, true); |
4769 | } else | 4816 | } else |
4770 | BUG(); | 4817 | BUG(); |
4771 | 4818 | ||
@@ -5525,9 +5572,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5525 | * core guarantees its existence. | 5572 | * core guarantees its existence. |
5526 | */ | 5573 | */ |
5527 | } else { | 5574 | } else { |
5528 | res_counter_init(&memcg->res, &root_mem_cgroup->res); | 5575 | res_counter_init(&memcg->res, NULL); |
5529 | res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw); | 5576 | res_counter_init(&memcg->memsw, NULL); |
5530 | res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); | 5577 | res_counter_init(&memcg->kmem, NULL); |
5531 | /* | 5578 | /* |
5532 | * Deeper hierachy with use_hierarchy == false doesn't make | 5579 | * Deeper hierachy with use_hierarchy == false doesn't make |
5533 | * much sense so let cgroup subsystem know about this | 5580 | * much sense so let cgroup subsystem know about this |
@@ -5969,8 +6016,9 @@ static void __mem_cgroup_clear_mc(void) | |||
5969 | /* we must fixup refcnts and charges */ | 6016 | /* we must fixup refcnts and charges */ |
5970 | if (mc.moved_swap) { | 6017 | if (mc.moved_swap) { |
5971 | /* uncharge swap account from the old cgroup */ | 6018 | /* uncharge swap account from the old cgroup */ |
5972 | res_counter_uncharge(&mc.from->memsw, | 6019 | if (!mem_cgroup_is_root(mc.from)) |
5973 | PAGE_SIZE * mc.moved_swap); | 6020 | res_counter_uncharge(&mc.from->memsw, |
6021 | PAGE_SIZE * mc.moved_swap); | ||
5974 | 6022 | ||
5975 | for (i = 0; i < mc.moved_swap; i++) | 6023 | for (i = 0; i < mc.moved_swap; i++) |
5976 | css_put(&mc.from->css); | 6024 | css_put(&mc.from->css); |
@@ -5979,8 +6027,9 @@ static void __mem_cgroup_clear_mc(void) | |||
5979 | * we charged both to->res and to->memsw, so we should | 6027 | * we charged both to->res and to->memsw, so we should |
5980 | * uncharge to->res. | 6028 | * uncharge to->res. |
5981 | */ | 6029 | */ |
5982 | res_counter_uncharge(&mc.to->res, | 6030 | if (!mem_cgroup_is_root(mc.to)) |
5983 | PAGE_SIZE * mc.moved_swap); | 6031 | res_counter_uncharge(&mc.to->res, |
6032 | PAGE_SIZE * mc.moved_swap); | ||
5984 | /* we've already done css_get(mc.to) */ | 6033 | /* we've already done css_get(mc.to) */ |
5985 | mc.moved_swap = 0; | 6034 | mc.moved_swap = 0; |
5986 | } | 6035 | } |
@@ -6345,7 +6394,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) | |||
6345 | rcu_read_lock(); | 6394 | rcu_read_lock(); |
6346 | memcg = mem_cgroup_lookup(id); | 6395 | memcg = mem_cgroup_lookup(id); |
6347 | if (memcg) { | 6396 | if (memcg) { |
6348 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 6397 | if (!mem_cgroup_is_root(memcg)) |
6398 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | ||
6349 | mem_cgroup_swap_statistics(memcg, false); | 6399 | mem_cgroup_swap_statistics(memcg, false); |
6350 | css_put(&memcg->css); | 6400 | css_put(&memcg->css); |
6351 | } | 6401 | } |
@@ -6509,12 +6559,15 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, | |||
6509 | { | 6559 | { |
6510 | unsigned long flags; | 6560 | unsigned long flags; |
6511 | 6561 | ||
6512 | if (nr_mem) | 6562 | if (!mem_cgroup_is_root(memcg)) { |
6513 | res_counter_uncharge(&memcg->res, nr_mem * PAGE_SIZE); | 6563 | if (nr_mem) |
6514 | if (nr_memsw) | 6564 | res_counter_uncharge(&memcg->res, |
6515 | res_counter_uncharge(&memcg->memsw, nr_memsw * PAGE_SIZE); | 6565 | nr_mem * PAGE_SIZE); |
6516 | 6566 | if (nr_memsw) | |
6517 | memcg_oom_recover(memcg); | 6567 | res_counter_uncharge(&memcg->memsw, |
6568 | nr_memsw * PAGE_SIZE); | ||
6569 | memcg_oom_recover(memcg); | ||
6570 | } | ||
6518 | 6571 | ||
6519 | local_irq_save(flags); | 6572 | local_irq_save(flags); |
6520 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); | 6573 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); |
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root) | |||
369 | struct vm_area_struct *vma; | 369 | struct vm_area_struct *vma; |
370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); | 370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); |
371 | if (vma->vm_start < prev) { | 371 | if (vma->vm_start < prev) { |
372 | pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev); | 372 | pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev); |
373 | bug = 1; | 373 | bug = 1; |
374 | } | 374 | } |
375 | if (vma->vm_start < pend) { | 375 | if (vma->vm_start < pend) { |
376 | pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend); | 376 | pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend); |
377 | bug = 1; | 377 | bug = 1; |
378 | } | 378 | } |
379 | if (vma->vm_start > vma->vm_end) { | 379 | if (vma->vm_start > vma->vm_end) { |
380 | pr_info("vm_end %lx < vm_start %lx\n", | 380 | pr_emerg("vm_end %lx < vm_start %lx\n", |
381 | vma->vm_end, vma->vm_start); | 381 | vma->vm_end, vma->vm_start); |
382 | bug = 1; | 382 | bug = 1; |
383 | } | 383 | } |
384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { | 384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { |
385 | pr_info("free gap %lx, correct %lx\n", | 385 | pr_emerg("free gap %lx, correct %lx\n", |
386 | vma->rb_subtree_gap, | 386 | vma->rb_subtree_gap, |
387 | vma_compute_subtree_gap(vma)); | 387 | vma_compute_subtree_gap(vma)); |
388 | bug = 1; | 388 | bug = 1; |
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root) | |||
396 | for (nd = pn; nd; nd = rb_prev(nd)) | 396 | for (nd = pn; nd; nd = rb_prev(nd)) |
397 | j++; | 397 | j++; |
398 | if (i != j) { | 398 | if (i != j) { |
399 | pr_info("backwards %d, forwards %d\n", j, i); | 399 | pr_emerg("backwards %d, forwards %d\n", j, i); |
400 | bug = 1; | 400 | bug = 1; |
401 | } | 401 | } |
402 | return bug ? -1 : i; | 402 | return bug ? -1 : i; |
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm) | |||
431 | i++; | 431 | i++; |
432 | } | 432 | } |
433 | if (i != mm->map_count) { | 433 | if (i != mm->map_count) { |
434 | pr_info("map_count %d vm_next %d\n", mm->map_count, i); | 434 | pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); |
435 | bug = 1; | 435 | bug = 1; |
436 | } | 436 | } |
437 | if (highest_address != mm->highest_vm_end) { | 437 | if (highest_address != mm->highest_vm_end) { |
438 | pr_info("mm->highest_vm_end %lx, found %lx\n", | 438 | pr_emerg("mm->highest_vm_end %lx, found %lx\n", |
439 | mm->highest_vm_end, highest_address); | 439 | mm->highest_vm_end, highest_address); |
440 | bug = 1; | 440 | bug = 1; |
441 | } | 441 | } |
442 | i = browse_rb(&mm->mm_rb); | 442 | i = browse_rb(&mm->mm_rb); |
443 | if (i != mm->map_count) { | 443 | if (i != mm->map_count) { |
444 | pr_info("map_count %d rb %d\n", mm->map_count, i); | 444 | pr_emerg("map_count %d rb %d\n", mm->map_count, i); |
445 | bug = 1; | 445 | bug = 1; |
446 | } | 446 | } |
447 | BUG_ON(bug); | 447 | BUG_ON(bug); |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 7ed58602e71b..7c7ab32ee503 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void) | |||
119 | phys_addr_t start, end; | 119 | phys_addr_t start, end; |
120 | u64 i; | 120 | u64 i; |
121 | 121 | ||
122 | memblock_clear_hotplug(0, -1); | ||
123 | |||
122 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) | 124 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) |
123 | count += __free_memory_core(start, end); | 125 | count += __free_memory_core(start, end); |
124 | 126 | ||
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 3707c71ae4cd..51108165f829 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | |||
108 | int page_start, int page_end) | 108 | int page_start, int page_end) |
109 | { | 109 | { |
110 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | 110 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; |
111 | unsigned int cpu; | 111 | unsigned int cpu, tcpu; |
112 | int i; | 112 | int i; |
113 | 113 | ||
114 | for_each_possible_cpu(cpu) { | 114 | for_each_possible_cpu(cpu) { |
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | |||
116 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; | 116 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; |
117 | 117 | ||
118 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); | 118 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); |
119 | if (!*pagep) { | 119 | if (!*pagep) |
120 | pcpu_free_pages(chunk, pages, populated, | 120 | goto err; |
121 | page_start, page_end); | ||
122 | return -ENOMEM; | ||
123 | } | ||
124 | } | 121 | } |
125 | } | 122 | } |
126 | return 0; | 123 | return 0; |
124 | |||
125 | err: | ||
126 | while (--i >= page_start) | ||
127 | __free_page(pages[pcpu_page_idx(cpu, i)]); | ||
128 | |||
129 | for_each_possible_cpu(tcpu) { | ||
130 | if (tcpu == cpu) | ||
131 | break; | ||
132 | for (i = page_start; i < page_end; i++) | ||
133 | __free_page(pages[pcpu_page_idx(tcpu, i)]); | ||
134 | } | ||
135 | return -ENOMEM; | ||
127 | } | 136 | } |
128 | 137 | ||
129 | /** | 138 | /** |
@@ -263,6 +272,7 @@ err: | |||
263 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), | 272 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), |
264 | page_end - page_start); | 273 | page_end - page_start); |
265 | } | 274 | } |
275 | pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); | ||
266 | return err; | 276 | return err; |
267 | } | 277 | } |
268 | 278 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 2139e30a4b44..da997f9800bd 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void) | |||
1932 | 1932 | ||
1933 | if (pcpu_setup_first_chunk(ai, fc) < 0) | 1933 | if (pcpu_setup_first_chunk(ai, fc) < 0) |
1934 | panic("Failed to initialize percpu areas."); | 1934 | panic("Failed to initialize percpu areas."); |
1935 | |||
1936 | pcpu_free_alloc_info(ai); | ||
1935 | } | 1937 | } |
1936 | 1938 | ||
1937 | #endif /* CONFIG_SMP */ | 1939 | #endif /* CONFIG_SMP */ |