diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-05-09 03:39:44 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:24:38 -0400 |
commit | 15c8b6c1aaaf1c4edd67e2f02e4d8e1bd1a51c0d (patch) | |
tree | 3658f893c2f89ea0be4c6cc08aa11fa54476d0f4 /arch | |
parent | 8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 (diff) |
on_each_cpu(): kill unused 'retry' parameter
It's not even passed on to smp_call_function() anymore, since that
was removed. So kill it.
Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch')
35 files changed, 63 insertions, 63 deletions
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 96ed82fd9eef..351407e07e71 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd) | |||
160 | struct halt_info args; | 160 | struct halt_info args; |
161 | args.mode = mode; | 161 | args.mode = mode; |
162 | args.restart_cmd = restart_cmd; | 162 | args.restart_cmd = restart_cmd; |
163 | on_each_cpu(common_shutdown_1, &args, 1, 0); | 163 | on_each_cpu(common_shutdown_1, &args, 0); |
164 | } | 164 | } |
165 | 165 | ||
166 | void | 166 | void |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 44114c8dbb2a..83df541650fc 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -657,7 +657,7 @@ void | |||
657 | smp_imb(void) | 657 | smp_imb(void) |
658 | { | 658 | { |
659 | /* Must wait other processors to flush their icache before continue. */ | 659 | /* Must wait other processors to flush their icache before continue. */ |
660 | if (on_each_cpu(ipi_imb, NULL, 1, 1)) | 660 | if (on_each_cpu(ipi_imb, NULL, 1)) |
661 | printk(KERN_CRIT "smp_imb: timed out\n"); | 661 | printk(KERN_CRIT "smp_imb: timed out\n"); |
662 | } | 662 | } |
663 | EXPORT_SYMBOL(smp_imb); | 663 | EXPORT_SYMBOL(smp_imb); |
@@ -673,7 +673,7 @@ flush_tlb_all(void) | |||
673 | { | 673 | { |
674 | /* Although we don't have any data to pass, we do want to | 674 | /* Although we don't have any data to pass, we do want to |
675 | synchronize with the other processors. */ | 675 | synchronize with the other processors. */ |
676 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { | 676 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { |
677 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); | 677 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); |
678 | } | 678 | } |
679 | } | 679 | } |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 6344466b2113..5a7c09564d13 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -604,7 +604,7 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) | |||
604 | 604 | ||
605 | void flush_tlb_all(void) | 605 | void flush_tlb_all(void) |
606 | { | 606 | { |
607 | on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); | 607 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
608 | } | 608 | } |
609 | 609 | ||
610 | void flush_tlb_mm(struct mm_struct *mm) | 610 | void flush_tlb_mm(struct mm_struct *mm) |
@@ -631,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) | |||
631 | 631 | ||
632 | ta.ta_start = kaddr; | 632 | ta.ta_start = kaddr; |
633 | 633 | ||
634 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); | 634 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); |
635 | } | 635 | } |
636 | 636 | ||
637 | void flush_tlb_range(struct vm_area_struct *vma, | 637 | void flush_tlb_range(struct vm_area_struct *vma, |
@@ -654,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
654 | ta.ta_start = start; | 654 | ta.ta_start = start; |
655 | ta.ta_end = end; | 655 | ta.ta_end = end; |
656 | 656 | ||
657 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); | 657 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
658 | } | 658 | } |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 9cd818cc7008..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
707 | static void | 707 | static void |
708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | 708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
709 | { | 709 | { |
710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); |
711 | } | 711 | } |
712 | 712 | ||
713 | /* | 713 | /* |
@@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | |||
719 | static void | 719 | static void |
720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) | 720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
721 | { | 721 | { |
722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); |
723 | } | 723 | } |
724 | 724 | ||
725 | /* | 725 | /* |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9baa48255c12..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
6508 | } | 6508 | } |
6509 | 6509 | ||
6510 | /* save the current system wide pmu states */ | 6510 | /* save the current system wide pmu states */ |
6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); | 6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); |
6512 | if (ret) { | 6512 | if (ret) { |
6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
6514 | goto cleanup_reserve; | 6514 | goto cleanup_reserve; |
@@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
6553 | 6553 | ||
6554 | pfm_alt_intr_handler = NULL; | 6554 | pfm_alt_intr_handler = NULL; |
6555 | 6555 | ||
6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); | 6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); |
6557 | if (ret) { | 6557 | if (ret) { |
6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
6559 | } | 6559 | } |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 19152dcbf6e4..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -285,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) | |||
285 | void | 285 | void |
286 | smp_flush_tlb_all (void) | 286 | smp_flush_tlb_all (void) |
287 | { | 287 | { |
288 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); | 288 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); |
289 | } | 289 | } |
290 | 290 | ||
291 | void | 291 | void |
@@ -308,7 +308,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
308 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is | 308 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is |
309 | * rather trivial. | 309 | * rather trivial. |
310 | */ | 310 | */ |
311 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); | 311 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); |
312 | } | 312 | } |
313 | 313 | ||
314 | void arch_send_call_function_single_ipi(int cpu) | 314 | void arch_send_call_function_single_ipi(int cpu) |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ed9febe63d72..b47e4615ec12 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args) | |||
49 | 49 | ||
50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) | 50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) |
51 | { | 51 | { |
52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); | 52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); |
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
@@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args) | |||
66 | 66 | ||
67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | 67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) |
68 | { | 68 | { |
69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); | 69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct irq_chip rm9k_irq_controller = { | 72 | static struct irq_chip rm9k_irq_controller = { |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 7a9ae830be86..4410f172b8ab 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -246,7 +246,7 @@ static void flush_tlb_all_ipi(void *info) | |||
246 | 246 | ||
247 | void flush_tlb_all(void) | 247 | void flush_tlb_all(void) |
248 | { | 248 | { |
249 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); | 249 | on_each_cpu(flush_tlb_all_ipi, NULL, 1); |
250 | } | 250 | } |
251 | 251 | ||
252 | static void flush_tlb_mm_ipi(void *mm) | 252 | static void flush_tlb_mm_ipi(void *mm) |
@@ -366,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
366 | .addr2 = end, | 366 | .addr2 = end, |
367 | }; | 367 | }; |
368 | 368 | ||
369 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); | 369 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); |
370 | } | 370 | } |
371 | 371 | ||
372 | static void flush_tlb_page_ipi(void *info) | 372 | static void flush_tlb_page_ipi(void *info) |
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index b5f6f71b27bc..dd2fbd6645c1 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c | |||
@@ -27,7 +27,7 @@ static int op_mips_setup(void) | |||
27 | model->reg_setup(ctr); | 27 | model->reg_setup(ctr); |
28 | 28 | ||
29 | /* Configure the registers on all cpus. */ | 29 | /* Configure the registers on all cpus. */ |
30 | on_each_cpu(model->cpu_setup, NULL, 0, 1); | 30 | on_each_cpu(model->cpu_setup, NULL, 1); |
31 | 31 | ||
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
@@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root) | |||
58 | 58 | ||
59 | static int op_mips_start(void) | 59 | static int op_mips_start(void) |
60 | { | 60 | { |
61 | on_each_cpu(model->cpu_start, NULL, 0, 1); | 61 | on_each_cpu(model->cpu_start, NULL, 1); |
62 | 62 | ||
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
@@ -66,7 +66,7 @@ static int op_mips_start(void) | |||
66 | static void op_mips_stop(void) | 66 | static void op_mips_stop(void) |
67 | { | 67 | { |
68 | /* Disable performance monitoring for all counters. */ | 68 | /* Disable performance monitoring for all counters. */ |
69 | on_each_cpu(model->cpu_stop, NULL, 0, 1); | 69 | on_each_cpu(model->cpu_stop, NULL, 1); |
70 | } | 70 | } |
71 | 71 | ||
72 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 72 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e10d25d2d9c9..5259d8c20676 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly; | |||
51 | void | 51 | void |
52 | flush_data_cache(void) | 52 | flush_data_cache(void) |
53 | { | 53 | { |
54 | on_each_cpu(flush_data_cache_local, NULL, 1, 1); | 54 | on_each_cpu(flush_data_cache_local, NULL, 1); |
55 | } | 55 | } |
56 | void | 56 | void |
57 | flush_instruction_cache(void) | 57 | flush_instruction_cache(void) |
58 | { | 58 | { |
59 | on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); | 59 | on_each_cpu(flush_instruction_cache_local, NULL, 1); |
60 | } | 60 | } |
61 | #endif | 61 | #endif |
62 | 62 | ||
@@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy) | |||
515 | 515 | ||
516 | void flush_cache_all(void) | 516 | void flush_cache_all(void) |
517 | { | 517 | { |
518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); | 518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1); |
519 | } | 519 | } |
520 | 520 | ||
521 | void flush_cache_mm(struct mm_struct *mm) | 521 | void flush_cache_mm(struct mm_struct *mm) |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 126105c76a44..d47f3975c9c6 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -292,7 +292,7 @@ void arch_send_call_function_single_ipi(int cpu) | |||
292 | void | 292 | void |
293 | smp_flush_tlb_all(void) | 293 | smp_flush_tlb_all(void) |
294 | { | 294 | { |
295 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 295 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
296 | } | 296 | } |
297 | 297 | ||
298 | /* | 298 | /* |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ce0da689a89d..b4d6c8777ed0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -1053,7 +1053,7 @@ void flush_tlb_all(void) | |||
1053 | do_recycle++; | 1053 | do_recycle++; |
1054 | } | 1054 | } |
1055 | spin_unlock(&sid_lock); | 1055 | spin_unlock(&sid_lock); |
1056 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 1056 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
1057 | if (do_recycle) { | 1057 | if (do_recycle) { |
1058 | spin_lock(&sid_lock); | 1058 | spin_lock(&sid_lock); |
1059 | recycle_sids(recycle_ndirty,recycle_dirty_array); | 1059 | recycle_sids(recycle_ndirty,recycle_dirty_array); |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 34843c318419..647f3e8677dc 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
747 | /* Call function on all CPUs. One of us will make the | 747 | /* Call function on all CPUs. One of us will make the |
748 | * rtas call | 748 | * rtas call |
749 | */ | 749 | */ |
750 | if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) | 750 | if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) |
751 | data.error = -EINVAL; | 751 | data.error = -EINVAL; |
752 | 752 | ||
753 | wait_for_completion(&done); | 753 | wait_for_completion(&done); |
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 368a4934f7ee..c3a56d65c5a9 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c | |||
@@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused) | |||
192 | 192 | ||
193 | /* schedule ourselves to be run again */ | 193 | /* schedule ourselves to be run again */ |
194 | mod_timer(&tau_timer, jiffies + shrink_timer) ; | 194 | mod_timer(&tau_timer, jiffies + shrink_timer) ; |
195 | on_each_cpu(tau_timeout, NULL, 1, 0); | 195 | on_each_cpu(tau_timeout, NULL, 0); |
196 | } | 196 | } |
197 | 197 | ||
198 | /* | 198 | /* |
@@ -234,7 +234,7 @@ int __init TAU_init(void) | |||
234 | tau_timer.expires = jiffies + shrink_timer; | 234 | tau_timer.expires = jiffies + shrink_timer; |
235 | add_timer(&tau_timer); | 235 | add_timer(&tau_timer); |
236 | 236 | ||
237 | on_each_cpu(TAU_init_smp, NULL, 1, 0); | 237 | on_each_cpu(TAU_init_smp, NULL, 0); |
238 | 238 | ||
239 | printk("Thermal assist unit "); | 239 | printk("Thermal assist unit "); |
240 | #ifdef CONFIG_TAU_INT | 240 | #ifdef CONFIG_TAU_INT |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 73401e83739a..f1a38a6c1e2d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -322,7 +322,7 @@ void snapshot_timebases(void) | |||
322 | { | 322 | { |
323 | if (!cpu_has_feature(CPU_FTR_PURR)) | 323 | if (!cpu_has_feature(CPU_FTR_PURR)) |
324 | return; | 324 | return; |
325 | on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); | 325 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); |
326 | } | 326 | } |
327 | 327 | ||
328 | /* | 328 | /* |
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index ad928edafb0a..2bd12d965db1 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
@@ -218,7 +218,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz | |||
218 | mb(); | 218 | mb(); |
219 | 219 | ||
220 | /* XXX this is sub-optimal but will do for now */ | 220 | /* XXX this is sub-optimal but will do for now */ |
221 | on_each_cpu(slice_flush_segments, mm, 0, 1); | 221 | on_each_cpu(slice_flush_segments, mm, 1); |
222 | #ifdef CONFIG_SPU_BASE | 222 | #ifdef CONFIG_SPU_BASE |
223 | spu_flush_all_slbs(mm); | 223 | spu_flush_all_slbs(mm); |
224 | #endif | 224 | #endif |
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 4908dc98f9ca..17807acb05d9 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
@@ -65,7 +65,7 @@ static int op_powerpc_setup(void) | |||
65 | 65 | ||
66 | /* Configure the registers on all cpus. If an error occurs on one | 66 | /* Configure the registers on all cpus. If an error occurs on one |
67 | * of the cpus, op_per_cpu_rc will be set to the error */ | 67 | * of the cpus, op_per_cpu_rc will be set to the error */ |
68 | on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); | 68 | on_each_cpu(op_powerpc_cpu_setup, NULL, 1); |
69 | 69 | ||
70 | out: if (op_per_cpu_rc) { | 70 | out: if (op_per_cpu_rc) { |
71 | /* error on setup release the performance counter hardware */ | 71 | /* error on setup release the performance counter hardware */ |
@@ -100,7 +100,7 @@ static int op_powerpc_start(void) | |||
100 | if (model->global_start) | 100 | if (model->global_start) |
101 | return model->global_start(ctr); | 101 | return model->global_start(ctr); |
102 | if (model->start) { | 102 | if (model->start) { |
103 | on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1); | 103 | on_each_cpu(op_powerpc_cpu_start, NULL, 1); |
104 | return op_per_cpu_rc; | 104 | return op_per_cpu_rc; |
105 | } | 105 | } |
106 | return -EIO; /* No start function is defined for this | 106 | return -EIO; /* No start function is defined for this |
@@ -115,7 +115,7 @@ static inline void op_powerpc_cpu_stop(void *dummy) | |||
115 | static void op_powerpc_stop(void) | 115 | static void op_powerpc_stop(void) |
116 | { | 116 | { |
117 | if (model->stop) | 117 | if (model->stop) |
118 | on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1); | 118 | on_each_cpu(op_powerpc_cpu_stop, NULL, 1); |
119 | if (model->global_stop) | 119 | if (model->global_stop) |
120 | model->global_stop(); | 120 | model->global_stop(); |
121 | } | 121 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 276b105fb2a4..b6781030cfbd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -299,7 +299,7 @@ static void smp_ptlb_callback(void *info) | |||
299 | 299 | ||
300 | void smp_ptlb_all(void) | 300 | void smp_ptlb_all(void) |
301 | { | 301 | { |
302 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | 302 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
303 | } | 303 | } |
304 | EXPORT_SYMBOL(smp_ptlb_all); | 304 | EXPORT_SYMBOL(smp_ptlb_all); |
305 | #endif /* ! CONFIG_64BIT */ | 305 | #endif /* ! CONFIG_64BIT */ |
@@ -347,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
347 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 347 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
348 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 348 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
349 | parms.orvals[cr] = 1 << bit; | 349 | parms.orvals[cr] = 1 << bit; |
350 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 350 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
351 | } | 351 | } |
352 | EXPORT_SYMBOL(smp_ctl_set_bit); | 352 | EXPORT_SYMBOL(smp_ctl_set_bit); |
353 | 353 | ||
@@ -361,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
361 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 361 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
362 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 362 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
363 | parms.andvals[cr] = ~(1L << bit); | 363 | parms.andvals[cr] = ~(1L << bit); |
364 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 364 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
365 | } | 365 | } |
366 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 366 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
367 | 367 | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index bf7bf2c2236a..6037ed2b7471 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -909,7 +909,7 @@ static void etr_work_fn(struct work_struct *work) | |||
909 | if (!eacr.ea) { | 909 | if (!eacr.ea) { |
910 | /* Both ports offline. Reset everything. */ | 910 | /* Both ports offline. Reset everything. */ |
911 | eacr.dp = eacr.es = eacr.sl = 0; | 911 | eacr.dp = eacr.es = eacr.sl = 0; |
912 | on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); | 912 | on_each_cpu(etr_disable_sync_clock, NULL, 1); |
913 | del_timer_sync(&etr_timer); | 913 | del_timer_sync(&etr_timer); |
914 | etr_update_eacr(eacr); | 914 | etr_update_eacr(eacr); |
915 | set_bit(ETR_FLAG_EACCES, &etr_flags); | 915 | set_bit(ETR_FLAG_EACCES, &etr_flags); |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 71781ba2675b..60c50841143e 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -197,7 +197,7 @@ static void flush_tlb_all_ipi(void *info) | |||
197 | 197 | ||
198 | void flush_tlb_all(void) | 198 | void flush_tlb_all(void) |
199 | { | 199 | { |
200 | on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); | 200 | on_each_cpu(flush_tlb_all_ipi, 0, 1); |
201 | } | 201 | } |
202 | 202 | ||
203 | static void flush_tlb_mm_ipi(void *mm) | 203 | static void flush_tlb_mm_ipi(void *mm) |
@@ -284,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
284 | 284 | ||
285 | fd.addr1 = start; | 285 | fd.addr1 = start; |
286 | fd.addr2 = end; | 286 | fd.addr2 = end; |
287 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); | 287 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); |
288 | } | 288 | } |
289 | 289 | ||
290 | static void flush_tlb_page_ipi(void *info) | 290 | static void flush_tlb_page_ipi(void *info) |
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 6cfab2e4d340..ebefd2a14375 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
@@ -344,7 +344,7 @@ void hugetlb_prefault_arch_hook(struct mm_struct *mm) | |||
344 | * also executing in this address space. | 344 | * also executing in this address space. |
345 | */ | 345 | */ |
346 | mm->context.sparc64_ctx_val = ctx; | 346 | mm->context.sparc64_ctx_val = ctx; |
347 | on_each_cpu(context_reload, mm, 0, 0); | 347 | on_each_cpu(context_reload, mm, 0); |
348 | } | 348 | } |
349 | spin_unlock(&ctx_alloc_lock); | 349 | spin_unlock(&ctx_alloc_lock); |
350 | } | 350 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index e07e8c068ae0..43b7cb594912 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -363,7 +363,7 @@ static void mcheck_check_cpu(void *info) | |||
363 | 363 | ||
364 | static void mcheck_timer(struct work_struct *work) | 364 | static void mcheck_timer(struct work_struct *work) |
365 | { | 365 | { |
366 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 366 | on_each_cpu(mcheck_check_cpu, NULL, 1); |
367 | 367 | ||
368 | /* | 368 | /* |
369 | * Alert userspace if needed. If we logged an MCE, reduce the | 369 | * Alert userspace if needed. If we logged an MCE, reduce the |
@@ -612,7 +612,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
612 | * Collect entries that were still getting written before the | 612 | * Collect entries that were still getting written before the |
613 | * synchronize. | 613 | * synchronize. |
614 | */ | 614 | */ |
615 | on_each_cpu(collect_tscs, cpu_tsc, 1, 1); | 615 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
616 | for (i = next; i < MCE_LOG_LEN; i++) { | 616 | for (i = next; i < MCE_LOG_LEN; i++) { |
617 | if (mcelog.entry[i].finished && | 617 | if (mcelog.entry[i].finished && |
618 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | 618 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { |
@@ -737,7 +737,7 @@ static void mce_restart(void) | |||
737 | if (next_interval) | 737 | if (next_interval) |
738 | cancel_delayed_work(&mcheck_work); | 738 | cancel_delayed_work(&mcheck_work); |
739 | /* Timer race is harmless here */ | 739 | /* Timer race is harmless here */ |
740 | on_each_cpu(mce_init, NULL, 1, 1); | 740 | on_each_cpu(mce_init, NULL, 1); |
741 | next_interval = check_interval * HZ; | 741 | next_interval = check_interval * HZ; |
742 | if (next_interval) | 742 | if (next_interval) |
743 | schedule_delayed_work(&mcheck_work, | 743 | schedule_delayed_work(&mcheck_work, |
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c14ec2..cc1fccdd31e0 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); | |||
59 | 59 | ||
60 | static void mce_work_fn(struct work_struct *work) | 60 | static void mce_work_fn(struct work_struct *work) |
61 | { | 61 | { |
62 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 62 | on_each_cpu(mce_checkregs, NULL, 1); |
63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
64 | } | 64 | } |
65 | 65 | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index f9ae93adffe5..58043f06d7e2 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -180,7 +180,7 @@ void disable_lapic_nmi_watchdog(void) | |||
180 | if (atomic_read(&nmi_active) <= 0) | 180 | if (atomic_read(&nmi_active) <= 0) |
181 | return; | 181 | return; |
182 | 182 | ||
183 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | 183 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); |
184 | wd_ops->unreserve(); | 184 | wd_ops->unreserve(); |
185 | 185 | ||
186 | BUG_ON(atomic_read(&nmi_active) != 0); | 186 | BUG_ON(atomic_read(&nmi_active) != 0); |
@@ -202,7 +202,7 @@ void enable_lapic_nmi_watchdog(void) | |||
202 | return; | 202 | return; |
203 | } | 203 | } |
204 | 204 | ||
205 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | 205 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); |
206 | touch_nmi_watchdog(); | 206 | touch_nmi_watchdog(); |
207 | } | 207 | } |
208 | 208 | ||
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4dc8600d9d20..720640ff36ca 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -1565,7 +1565,7 @@ void /*__init*/ print_local_APIC(void * dummy) | |||
1565 | 1565 | ||
1566 | void print_all_local_APICs (void) | 1566 | void print_all_local_APICs (void) |
1567 | { | 1567 | { |
1568 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1568 | on_each_cpu(print_local_APIC, NULL, 1); |
1569 | } | 1569 | } |
1570 | 1570 | ||
1571 | void /*__init*/ print_PIC(void) | 1571 | void /*__init*/ print_PIC(void) |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index ef1a8dfcc529..4504c7f50012 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -1146,7 +1146,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1146 | 1146 | ||
1147 | void print_all_local_APICs (void) | 1147 | void print_all_local_APICs (void) |
1148 | { | 1148 | { |
1149 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1149 | on_each_cpu(print_local_APIC, NULL, 1); |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | void __apicdebuginit print_PIC(void) | 1152 | void __apicdebuginit print_PIC(void) |
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 5562dab0bd20..11008e0857c0 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -218,7 +218,7 @@ static void __acpi_nmi_enable(void *__unused) | |||
218 | void acpi_nmi_enable(void) | 218 | void acpi_nmi_enable(void) |
219 | { | 219 | { |
220 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 220 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
221 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | 221 | on_each_cpu(__acpi_nmi_enable, NULL, 1); |
222 | } | 222 | } |
223 | 223 | ||
224 | static void __acpi_nmi_disable(void *__unused) | 224 | static void __acpi_nmi_disable(void *__unused) |
@@ -232,7 +232,7 @@ static void __acpi_nmi_disable(void *__unused) | |||
232 | void acpi_nmi_disable(void) | 232 | void acpi_nmi_disable(void) |
233 | { | 233 | { |
234 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 234 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
235 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | 235 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
236 | } | 236 | } |
237 | 237 | ||
238 | void setup_apic_nmi_watchdog(void *unused) | 238 | void setup_apic_nmi_watchdog(void *unused) |
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 2f1e4f503c9e..bbdcb17b3dfe 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -225,7 +225,7 @@ static void __acpi_nmi_enable(void *__unused) | |||
225 | void acpi_nmi_enable(void) | 225 | void acpi_nmi_enable(void) |
226 | { | 226 | { |
227 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 227 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
228 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | 228 | on_each_cpu(__acpi_nmi_enable, NULL, 1); |
229 | } | 229 | } |
230 | 230 | ||
231 | static void __acpi_nmi_disable(void *__unused) | 231 | static void __acpi_nmi_disable(void *__unused) |
@@ -239,7 +239,7 @@ static void __acpi_nmi_disable(void *__unused) | |||
239 | void acpi_nmi_disable(void) | 239 | void acpi_nmi_disable(void) |
240 | { | 240 | { |
241 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 241 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
242 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | 242 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
243 | } | 243 | } |
244 | 244 | ||
245 | void setup_apic_nmi_watchdog(void *unused) | 245 | void setup_apic_nmi_watchdog(void *unused) |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363851af..fec1ecedc9b7 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) | |||
238 | 238 | ||
239 | void flush_tlb_all(void) | 239 | void flush_tlb_all(void) |
240 | { | 240 | { |
241 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 241 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
242 | } | 242 | } |
243 | 243 | ||
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index a1f07d793202..184a367516d3 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -270,5 +270,5 @@ static void do_flush_tlb_all(void *info) | |||
270 | 270 | ||
271 | void flush_tlb_all(void) | 271 | void flush_tlb_all(void) |
272 | { | 272 | { |
273 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 273 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
274 | } | 274 | } |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 0a03d57f9b3b..0dcae19ed627 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -301,7 +301,7 @@ static int __init vsyscall_init(void) | |||
301 | #ifdef CONFIG_SYSCTL | 301 | #ifdef CONFIG_SYSCTL |
302 | register_sysctl_table(kernel_root_table2); | 302 | register_sysctl_table(kernel_root_table2); |
303 | #endif | 303 | #endif |
304 | on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); | 304 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
305 | hotcpu_notifier(cpu_vsyscall_notifier, 0); | 305 | hotcpu_notifier(cpu_vsyscall_notifier, 0); |
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5534fe59b5fc..10ce6ee4c491 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | |||
2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2969 | 2969 | ||
2970 | if (vmx->vmcs) { | 2970 | if (vmx->vmcs) { |
2971 | on_each_cpu(__vcpu_clear, vmx, 0, 1); | 2971 | on_each_cpu(__vcpu_clear, vmx, 1); |
2972 | free_vmcs(vmx->vmcs); | 2972 | free_vmcs(vmx->vmcs); |
2973 | vmx->vmcs = NULL; | 2973 | vmx->vmcs = NULL; |
2974 | } | 2974 | } |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 04f596eab749..abea08459a73 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -1072,7 +1072,7 @@ static void do_flush_tlb_all(void *info) | |||
1072 | /* flush the TLB of every active CPU in the system */ | 1072 | /* flush the TLB of every active CPU in the system */ |
1073 | void flush_tlb_all(void) | 1073 | void flush_tlb_all(void) |
1074 | { | 1074 | { |
1075 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | 1075 | on_each_cpu(do_flush_tlb_all, 0, 1); |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | /* used to set up the trampoline for other CPUs when the memory manager | 1078 | /* used to set up the trampoline for other CPUs when the memory manager |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 60bcb5b6a37e..9b836ba9dedd 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -106,7 +106,7 @@ static void cpa_flush_all(unsigned long cache) | |||
106 | { | 106 | { |
107 | BUG_ON(irqs_disabled()); | 107 | BUG_ON(irqs_disabled()); |
108 | 108 | ||
109 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); | 109 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
110 | } | 110 | } |
111 | 111 | ||
112 | static void __cpa_flush_range(void *arg) | 112 | static void __cpa_flush_range(void *arg) |
@@ -127,7 +127,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) | |||
127 | BUG_ON(irqs_disabled()); | 127 | BUG_ON(irqs_disabled()); |
128 | WARN_ON(PAGE_ALIGN(start) != start); | 128 | WARN_ON(PAGE_ALIGN(start) != start); |
129 | 129 | ||
130 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); | 130 | on_each_cpu(__cpa_flush_range, NULL, 1); |
131 | 131 | ||
132 | if (!cache) | 132 | if (!cache) |
133 | return; | 133 | return; |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cc48d3fde545..3238ad32ffd8 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -218,8 +218,8 @@ static int nmi_setup(void) | |||
218 | } | 218 | } |
219 | 219 | ||
220 | } | 220 | } |
221 | on_each_cpu(nmi_save_registers, NULL, 0, 1); | 221 | on_each_cpu(nmi_save_registers, NULL, 1); |
222 | on_each_cpu(nmi_cpu_setup, NULL, 0, 1); | 222 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
223 | nmi_enabled = 1; | 223 | nmi_enabled = 1; |
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |
@@ -271,7 +271,7 @@ static void nmi_shutdown(void) | |||
271 | { | 271 | { |
272 | struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); | 272 | struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); |
273 | nmi_enabled = 0; | 273 | nmi_enabled = 0; |
274 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); | 274 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
275 | unregister_die_notifier(&profile_exceptions_nb); | 275 | unregister_die_notifier(&profile_exceptions_nb); |
276 | model->shutdown(msrs); | 276 | model->shutdown(msrs); |
277 | free_msrs(); | 277 | free_msrs(); |
@@ -285,7 +285,7 @@ static void nmi_cpu_start(void *dummy) | |||
285 | 285 | ||
286 | static int nmi_start(void) | 286 | static int nmi_start(void) |
287 | { | 287 | { |
288 | on_each_cpu(nmi_cpu_start, NULL, 0, 1); | 288 | on_each_cpu(nmi_cpu_start, NULL, 1); |
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 | ||
@@ -297,7 +297,7 @@ static void nmi_cpu_stop(void *dummy) | |||
297 | 297 | ||
298 | static void nmi_stop(void) | 298 | static void nmi_stop(void) |
299 | { | 299 | { |
300 | on_each_cpu(nmi_cpu_stop, NULL, 0, 1); | 300 | on_each_cpu(nmi_cpu_stop, NULL, 1); |
301 | } | 301 | } |
302 | 302 | ||
303 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | 303 | struct op_counter_config counter_config[OP_MAX_COUNTER]; |