diff options
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 9 | ||||
-rw-r--r-- | drivers/acpi/processor_perflib.c | 3 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 3 | ||||
-rw-r--r-- | drivers/net/sfc/efx.c | 3 | ||||
-rw-r--r-- | drivers/oprofile/buffer_sync.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace.c | 7 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 3 |
9 files changed, 14 insertions, 30 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 64970b9885f2..dc69f28489f5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node) | |||
227 | 227 | ||
228 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 228 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
229 | if (cfg) { | 229 | if (cfg) { |
230 | if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | 230 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { |
231 | kfree(cfg); | 231 | kfree(cfg); |
232 | cfg = NULL; | 232 | cfg = NULL; |
233 | } else if (!alloc_cpumask_var_node(&cfg->old_domain, | 233 | } else if (!zalloc_cpumask_var_node(&cfg->old_domain, |
234 | GFP_ATOMIC, node)) { | 234 | GFP_ATOMIC, node)) { |
235 | free_cpumask_var(cfg->domain); | 235 | free_cpumask_var(cfg->domain); |
236 | kfree(cfg); | 236 | kfree(cfg); |
237 | cfg = NULL; | 237 | cfg = NULL; |
238 | } else { | ||
239 | cpumask_clear(cfg->domain); | ||
240 | cpumask_clear(cfg->old_domain); | ||
241 | } | 238 | } |
242 | } | 239 | } |
243 | 240 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 847ab4160315..5284cd2b5776 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
555 | void __init init_c1e_mask(void) | 555 | void __init init_c1e_mask(void) |
556 | { | 556 | { |
557 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | 557 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ |
558 | if (pm_idle == c1e_idle) { | 558 | if (pm_idle == c1e_idle) |
559 | alloc_cpumask_var(&c1e_mask, GFP_KERNEL); | 559 | zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); |
560 | cpumask_clear(c1e_mask); | ||
561 | } | ||
562 | } | 560 | } |
563 | 561 | ||
564 | static int __init idle_setup(char *str) | 562 | static int __init idle_setup(char *str) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 09c5e077dff7..565ebc65920e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1059 | #endif | 1059 | #endif |
1060 | current_thread_info()->cpu = 0; /* needed? */ | 1060 | current_thread_info()->cpu = 0; /* needed? */ |
1061 | for_each_possible_cpu(i) { | 1061 | for_each_possible_cpu(i) { |
1062 | alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | 1062 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1063 | alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | 1063 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
1064 | alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | 1064 | zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); |
1065 | cpumask_clear(per_cpu(cpu_core_map, i)); | ||
1066 | cpumask_clear(per_cpu(cpu_sibling_map, i)); | ||
1067 | cpumask_clear(cpu_data(i).llc_shared_map); | ||
1068 | } | 1065 | } |
1069 | set_cpu_sibling_map(0); | 1066 | set_cpu_sibling_map(0); |
1070 | 1067 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 11088cf10319..8ba0ed0b9ddb 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -511,7 +511,7 @@ int acpi_processor_preregister_performance( | |||
511 | struct acpi_processor *match_pr; | 511 | struct acpi_processor *match_pr; |
512 | struct acpi_psd_package *match_pdomain; | 512 | struct acpi_psd_package *match_pdomain; |
513 | 513 | ||
514 | if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | 514 | if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) |
515 | return -ENOMEM; | 515 | return -ENOMEM; |
516 | 516 | ||
517 | mutex_lock(&performance_mutex); | 517 | mutex_lock(&performance_mutex); |
@@ -558,7 +558,6 @@ int acpi_processor_preregister_performance( | |||
558 | * Now that we have _PSD data from all CPUs, lets setup P-state | 558 | * Now that we have _PSD data from all CPUs, lets setup P-state |
559 | * domain info. | 559 | * domain info. |
560 | */ | 560 | */ |
561 | cpumask_clear(covered_cpus); | ||
562 | for_each_possible_cpu(i) { | 561 | for_each_possible_cpu(i) { |
563 | pr = per_cpu(processors, i); | 562 | pr = per_cpu(processors, i); |
564 | if (!pr) | 563 | if (!pr) |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ce7cf3bc5101..4c6c14c1e307 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void) | |||
77 | struct acpi_tsd_package *pdomain, *match_pdomain; | 77 | struct acpi_tsd_package *pdomain, *match_pdomain; |
78 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; | 78 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; |
79 | 79 | ||
80 | if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | 80 | if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) |
81 | return -ENOMEM; | 81 | return -ENOMEM; |
82 | 82 | ||
83 | /* | 83 | /* |
@@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void) | |||
105 | if (retval) | 105 | if (retval) |
106 | goto err_ret; | 106 | goto err_ret; |
107 | 107 | ||
108 | cpumask_clear(covered_cpus); | ||
109 | for_each_possible_cpu(i) { | 108 | for_each_possible_cpu(i) { |
110 | pr = per_cpu(processors, i); | 109 | pr = per_cpu(processors, i); |
111 | if (!pr) | 110 | if (!pr) |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 07a7e4b8f8fc..cc4b2f99989d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void) | |||
884 | int count; | 884 | int count; |
885 | int cpu; | 885 | int cpu; |
886 | 886 | ||
887 | if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { | 887 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
888 | printk(KERN_WARNING | 888 | printk(KERN_WARNING |
889 | "sfc: RSS disabled due to allocation failure\n"); | 889 | "sfc: RSS disabled due to allocation failure\n"); |
890 | return 1; | 890 | return 1; |
891 | } | 891 | } |
892 | 892 | ||
893 | cpumask_clear(core_mask); | ||
894 | count = 0; | 893 | count = 0; |
895 | for_each_online_cpu(cpu) { | 894 | for_each_online_cpu(cpu) { |
896 | if (!cpumask_test_cpu(cpu, core_mask)) { | 895 | if (!cpumask_test_cpu(cpu, core_mask)) { |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 8574622e36a5..c9e2ae90f195 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -154,9 +154,8 @@ int sync_start(void) | |||
154 | { | 154 | { |
155 | int err; | 155 | int err; |
156 | 156 | ||
157 | if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) | 157 | if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) |
158 | return -ENOMEM; | 158 | return -ENOMEM; |
159 | cpumask_clear(marked_cpus); | ||
160 | 159 | ||
161 | start_cpu_work(); | 160 | start_cpu_work(); |
162 | 161 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6c0f6a8a22eb..411af37f4be4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1984 | if (current_trace) | 1984 | if (current_trace) |
1985 | *iter->trace = *current_trace; | 1985 | *iter->trace = *current_trace; |
1986 | 1986 | ||
1987 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 1987 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
1988 | goto fail; | 1988 | goto fail; |
1989 | 1989 | ||
1990 | cpumask_clear(iter->started); | ||
1991 | |||
1992 | if (current_trace && current_trace->print_max) | 1990 | if (current_trace && current_trace->print_max) |
1993 | iter->tr = &max_tr; | 1991 | iter->tr = &max_tr; |
1994 | else | 1992 | else |
@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) | |||
4389 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4387 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4390 | goto out_free_buffer_mask; | 4388 | goto out_free_buffer_mask; |
4391 | 4389 | ||
4392 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4390 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
4393 | goto out_free_tracing_cpumask; | 4391 | goto out_free_tracing_cpumask; |
4394 | 4392 | ||
4395 | /* To save memory, keep the ring buffer size to its minimum */ | 4393 | /* To save memory, keep the ring buffer size to its minimum */ |
@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) | |||
4400 | 4398 | ||
4401 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4399 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4402 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4400 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4403 | cpumask_clear(tracing_reader_cpumask); | ||
4404 | 4401 | ||
4405 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4402 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4406 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4403 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 897bff3b7df9..034a798b0431 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) | |||
738 | bool called = true; | 738 | bool called = true; |
739 | struct kvm_vcpu *vcpu; | 739 | struct kvm_vcpu *vcpu; |
740 | 740 | ||
741 | if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) | 741 | zalloc_cpumask_var(&cpus, GFP_ATOMIC); |
742 | cpumask_clear(cpus); | ||
743 | 742 | ||
744 | spin_lock(&kvm->requests_lock); | 743 | spin_lock(&kvm->requests_lock); |
745 | me = smp_processor_id(); | 744 | me = smp_processor_id(); |