diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/mca.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 254 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 5 |
7 files changed, 28 insertions, 253 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 705176b434b3..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
707 | static void | 707 | static void |
708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | 708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
709 | { | 709 | { |
710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); |
711 | } | 711 | } |
712 | 712 | ||
713 | /* | 713 | /* |
@@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | |||
719 | static void | 719 | static void |
720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) | 720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
721 | { | 721 | { |
722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); |
723 | } | 723 | } |
724 | 724 | ||
725 | /* | 725 | /* |
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, | |||
1881 | case CPU_ONLINE: | 1881 | case CPU_ONLINE: |
1882 | case CPU_ONLINE_FROZEN: | 1882 | case CPU_ONLINE_FROZEN: |
1883 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, | 1883 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, |
1884 | NULL, 1, 0); | 1884 | NULL, 0); |
1885 | break; | 1885 | break; |
1886 | } | 1886 | } |
1887 | return NOTIFY_OK; | 1887 | return NOTIFY_OK; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 9dc00f7fe10e..e5c57f413ca2 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) | |||
921 | 921 | ||
922 | 922 | ||
923 | /* will send IPI to other CPU and wait for completion of remote call */ | 923 | /* will send IPI to other CPU and wait for completion of remote call */ |
924 | if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { | 924 | if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { |
925 | printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " | 925 | printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " |
926 | "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); | 926 | "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); |
927 | return 0; | 927 | return 0; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 7714a97b0104..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | |||
1820 | int ret; | 1820 | int ret; |
1821 | 1821 | ||
1822 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); | 1822 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); |
1823 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); | 1823 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); |
1824 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); | 1824 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); |
1825 | } | 1825 | } |
1826 | #endif /* CONFIG_SMP */ | 1826 | #endif /* CONFIG_SMP */ |
@@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
6508 | } | 6508 | } |
6509 | 6509 | ||
6510 | /* save the current system wide pmu states */ | 6510 | /* save the current system wide pmu states */ |
6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); | 6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); |
6512 | if (ret) { | 6512 | if (ret) { |
6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
6514 | goto cleanup_reserve; | 6514 | goto cleanup_reserve; |
@@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
6553 | 6553 | ||
6554 | pfm_alt_intr_handler = NULL; | 6554 | pfm_alt_intr_handler = NULL; |
6555 | 6555 | ||
6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); | 6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); |
6557 | if (ret) { | 6557 | if (ret) { |
6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
6559 | } | 6559 | } |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a3a34b4eb038..fabaf08d9a69 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -286,7 +286,7 @@ void cpu_idle_wait(void) | |||
286 | { | 286 | { |
287 | smp_mb(); | 287 | smp_mb(); |
288 | /* kick all the CPUs so that they exit out of pm_idle */ | 288 | /* kick all the CPUs so that they exit out of pm_idle */ |
289 | smp_call_function(do_nothing, NULL, 0, 1); | 289 | smp_call_function(do_nothing, NULL, 1); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 291 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
292 | 292 | ||
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { | |||
60 | 60 | ||
61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; | 61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; |
62 | 62 | ||
63 | |||
64 | /* | ||
65 | * Structure and data for smp_call_function(). This is designed to minimise static memory | ||
66 | * requirements. It also looks cleaner. | ||
67 | */ | ||
68 | static __cacheline_aligned DEFINE_SPINLOCK(call_lock); | ||
69 | |||
70 | struct call_data_struct { | ||
71 | void (*func) (void *info); | ||
72 | void *info; | ||
73 | long wait; | ||
74 | atomic_t started; | ||
75 | atomic_t finished; | ||
76 | }; | ||
77 | |||
78 | static volatile struct call_data_struct *call_data; | ||
79 | |||
80 | #define IPI_CALL_FUNC 0 | 63 | #define IPI_CALL_FUNC 0 |
81 | #define IPI_CPU_STOP 1 | 64 | #define IPI_CPU_STOP 1 |
65 | #define IPI_CALL_FUNC_SINGLE 2 | ||
82 | #define IPI_KDUMP_CPU_STOP 3 | 66 | #define IPI_KDUMP_CPU_STOP 3 |
83 | 67 | ||
84 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ | 68 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ |
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); | |||
86 | 70 | ||
87 | extern void cpu_halt (void); | 71 | extern void cpu_halt (void); |
88 | 72 | ||
89 | void | ||
90 | lock_ipi_calllock(void) | ||
91 | { | ||
92 | spin_lock_irq(&call_lock); | ||
93 | } | ||
94 | |||
95 | void | ||
96 | unlock_ipi_calllock(void) | ||
97 | { | ||
98 | spin_unlock_irq(&call_lock); | ||
99 | } | ||
100 | |||
101 | static inline void | ||
102 | handle_call_data(void) | ||
103 | { | ||
104 | struct call_data_struct *data; | ||
105 | void (*func)(void *info); | ||
106 | void *info; | ||
107 | int wait; | ||
108 | |||
109 | /* release the 'pointer lock' */ | ||
110 | data = (struct call_data_struct *)call_data; | ||
111 | func = data->func; | ||
112 | info = data->info; | ||
113 | wait = data->wait; | ||
114 | |||
115 | mb(); | ||
116 | atomic_inc(&data->started); | ||
117 | /* At this point the structure may be gone unless wait is true. */ | ||
118 | (*func)(info); | ||
119 | |||
120 | /* Notify the sending CPU that the task is done. */ | ||
121 | mb(); | ||
122 | if (wait) | ||
123 | atomic_inc(&data->finished); | ||
124 | } | ||
125 | |||
126 | static void | 73 | static void |
127 | stop_this_cpu(void) | 74 | stop_this_cpu(void) |
128 | { | 75 | { |
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) | |||
163 | ops &= ~(1 << which); | 110 | ops &= ~(1 << which); |
164 | 111 | ||
165 | switch (which) { | 112 | switch (which) { |
166 | case IPI_CALL_FUNC: | ||
167 | handle_call_data(); | ||
168 | break; | ||
169 | |||
170 | case IPI_CPU_STOP: | 113 | case IPI_CPU_STOP: |
171 | stop_this_cpu(); | 114 | stop_this_cpu(); |
172 | break; | 115 | break; |
116 | case IPI_CALL_FUNC: | ||
117 | generic_smp_call_function_interrupt(); | ||
118 | break; | ||
119 | case IPI_CALL_FUNC_SINGLE: | ||
120 | generic_smp_call_function_single_interrupt(); | ||
121 | break; | ||
173 | #ifdef CONFIG_KEXEC | 122 | #ifdef CONFIG_KEXEC |
174 | case IPI_KDUMP_CPU_STOP: | 123 | case IPI_KDUMP_CPU_STOP: |
175 | unw_init_running(kdump_cpu_freeze, NULL); | 124 | unw_init_running(kdump_cpu_freeze, NULL); |
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) | |||
187 | return IRQ_HANDLED; | 136 | return IRQ_HANDLED; |
188 | } | 137 | } |
189 | 138 | ||
139 | |||
140 | |||
190 | /* | 141 | /* |
191 | * Called with preemption disabled. | 142 | * Called with preemption disabled. |
192 | */ | 143 | */ |
@@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) | |||
334 | void | 285 | void |
335 | smp_flush_tlb_all (void) | 286 | smp_flush_tlb_all (void) |
336 | { | 287 | { |
337 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); | 288 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); |
338 | } | 289 | } |
339 | 290 | ||
340 | void | 291 | void |
@@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
357 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is | 308 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is |
358 | * rather trivial. | 309 | * rather trivial. |
359 | */ | 310 | */ |
360 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); | 311 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); |
361 | } | 312 | } |
362 | 313 | ||
363 | /* | 314 | void arch_send_call_function_single_ipi(int cpu) |
364 | * Run a function on a specific CPU | ||
365 | * <func> The function to run. This must be fast and non-blocking. | ||
366 | * <info> An arbitrary pointer to pass to the function. | ||
367 | * <nonatomic> Currently unused. | ||
368 | * <wait> If true, wait until function has completed on other CPUs. | ||
369 | * [RETURNS] 0 on success, else a negative status code. | ||
370 | * | ||
371 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
372 | * or is or has executed. | ||
373 | */ | ||
374 | |||
375 | int | ||
376 | smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, | ||
377 | int wait) | ||
378 | { | ||
379 | struct call_data_struct data; | ||
380 | int cpus = 1; | ||
381 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ | ||
382 | |||
383 | if (cpuid == me) { | ||
384 | local_irq_disable(); | ||
385 | func(info); | ||
386 | local_irq_enable(); | ||
387 | put_cpu(); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | data.func = func; | ||
392 | data.info = info; | ||
393 | atomic_set(&data.started, 0); | ||
394 | data.wait = wait; | ||
395 | if (wait) | ||
396 | atomic_set(&data.finished, 0); | ||
397 | |||
398 | spin_lock_bh(&call_lock); | ||
399 | |||
400 | call_data = &data; | ||
401 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
402 | send_IPI_single(cpuid, IPI_CALL_FUNC); | ||
403 | |||
404 | /* Wait for response */ | ||
405 | while (atomic_read(&data.started) != cpus) | ||
406 | cpu_relax(); | ||
407 | |||
408 | if (wait) | ||
409 | while (atomic_read(&data.finished) != cpus) | ||
410 | cpu_relax(); | ||
411 | call_data = NULL; | ||
412 | |||
413 | spin_unlock_bh(&call_lock); | ||
414 | put_cpu(); | ||
415 | return 0; | ||
416 | } | ||
417 | EXPORT_SYMBOL(smp_call_function_single); | ||
418 | |||
419 | /** | ||
420 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
421 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
422 | * <func> The function to run. This must be fast and non-blocking. | ||
423 | * <info> An arbitrary pointer to pass to the function. | ||
424 | * <wait> If true, wait (atomically) until function | ||
425 | * has completed on other CPUs. | ||
426 | * | ||
427 | * Returns 0 on success, else a negative status code. | ||
428 | * | ||
429 | * If @wait is true, then returns once @func has returned; otherwise | ||
430 | * it returns just before the target cpu calls @func. | ||
431 | * | ||
432 | * You must not call this function with disabled interrupts or from a | ||
433 | * hardware interrupt handler or from a bottom half handler. | ||
434 | */ | ||
435 | int smp_call_function_mask(cpumask_t mask, | ||
436 | void (*func)(void *), void *info, | ||
437 | int wait) | ||
438 | { | 315 | { |
439 | struct call_data_struct data; | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
440 | cpumask_t allbutself; | ||
441 | int cpus; | ||
442 | |||
443 | spin_lock(&call_lock); | ||
444 | allbutself = cpu_online_map; | ||
445 | cpu_clear(smp_processor_id(), allbutself); | ||
446 | |||
447 | cpus_and(mask, mask, allbutself); | ||
448 | cpus = cpus_weight(mask); | ||
449 | if (!cpus) { | ||
450 | spin_unlock(&call_lock); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* Can deadlock when called with interrupts disabled */ | ||
455 | WARN_ON(irqs_disabled()); | ||
456 | |||
457 | data.func = func; | ||
458 | data.info = info; | ||
459 | atomic_set(&data.started, 0); | ||
460 | data.wait = wait; | ||
461 | if (wait) | ||
462 | atomic_set(&data.finished, 0); | ||
463 | |||
464 | call_data = &data; | ||
465 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
466 | |||
467 | /* Send a message to other CPUs */ | ||
468 | if (cpus_equal(mask, allbutself)) | ||
469 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
470 | else | ||
471 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
472 | |||
473 | /* Wait for response */ | ||
474 | while (atomic_read(&data.started) != cpus) | ||
475 | cpu_relax(); | ||
476 | |||
477 | if (wait) | ||
478 | while (atomic_read(&data.finished) != cpus) | ||
479 | cpu_relax(); | ||
480 | call_data = NULL; | ||
481 | |||
482 | spin_unlock(&call_lock); | ||
483 | return 0; | ||
484 | |||
485 | } | 317 | } |
486 | EXPORT_SYMBOL(smp_call_function_mask); | ||
487 | 318 | ||
488 | /* | 319 | void arch_send_call_function_ipi(cpumask_t mask) |
489 | * this function sends a 'generic call function' IPI to all other CPUs | ||
490 | * in the system. | ||
491 | */ | ||
492 | |||
493 | /* | ||
494 | * [SUMMARY] Run a function on all other CPUs. | ||
495 | * <func> The function to run. This must be fast and non-blocking. | ||
496 | * <info> An arbitrary pointer to pass to the function. | ||
497 | * <nonatomic> currently unused. | ||
498 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
499 | * [RETURNS] 0 on success, else a negative status code. | ||
500 | * | ||
501 | * Does not return until remote CPUs are nearly ready to execute <func> or are or have | ||
502 | * executed. | ||
503 | * | ||
504 | * You must not call this function with disabled interrupts or from a | ||
505 | * hardware interrupt handler or from a bottom half handler. | ||
506 | */ | ||
507 | int | ||
508 | smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) | ||
509 | { | 320 | { |
510 | struct call_data_struct data; | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
511 | int cpus; | ||
512 | |||
513 | spin_lock(&call_lock); | ||
514 | cpus = num_online_cpus() - 1; | ||
515 | if (!cpus) { | ||
516 | spin_unlock(&call_lock); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | /* Can deadlock when called with interrupts disabled */ | ||
521 | WARN_ON(irqs_disabled()); | ||
522 | |||
523 | data.func = func; | ||
524 | data.info = info; | ||
525 | atomic_set(&data.started, 0); | ||
526 | data.wait = wait; | ||
527 | if (wait) | ||
528 | atomic_set(&data.finished, 0); | ||
529 | |||
530 | call_data = &data; | ||
531 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
532 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
533 | |||
534 | /* Wait for response */ | ||
535 | while (atomic_read(&data.started) != cpus) | ||
536 | cpu_relax(); | ||
537 | |||
538 | if (wait) | ||
539 | while (atomic_read(&data.finished) != cpus) | ||
540 | cpu_relax(); | ||
541 | call_data = NULL; | ||
542 | |||
543 | spin_unlock(&call_lock); | ||
544 | return 0; | ||
545 | } | 322 | } |
546 | EXPORT_SYMBOL(smp_call_function); | ||
547 | 323 | ||
548 | /* | 324 | /* |
549 | * this function calls the 'stop' function on all other CPUs in the system. | 325 | * this function calls the 'stop' function on all other CPUs in the system. |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d7ad42b77d41..9d1d429c6c59 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master) | |||
317 | 317 | ||
318 | go[MASTER] = 1; | 318 | go[MASTER] = 1; |
319 | 319 | ||
320 | if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { | 320 | if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { |
321 | printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); | 321 | printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); |
322 | return; | 322 | return; |
323 | } | 323 | } |
@@ -395,14 +395,14 @@ smp_callin (void) | |||
395 | 395 | ||
396 | fix_b0_for_bsp(); | 396 | fix_b0_for_bsp(); |
397 | 397 | ||
398 | lock_ipi_calllock(); | 398 | ipi_call_lock_irq(); |
399 | spin_lock(&vector_lock); | 399 | spin_lock(&vector_lock); |
400 | /* Setup the per cpu irq handling data structures */ | 400 | /* Setup the per cpu irq handling data structures */ |
401 | __setup_vector_irq(cpuid); | 401 | __setup_vector_irq(cpuid); |
402 | cpu_set(cpuid, cpu_online_map); | 402 | cpu_set(cpuid, cpu_online_map); |
403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
404 | spin_unlock(&vector_lock); | 404 | spin_unlock(&vector_lock); |
405 | unlock_ipi_calllock(); | 405 | ipi_call_unlock_irq(); |
406 | 406 | ||
407 | smp_setup_percpu_timer(); | 407 | smp_setup_percpu_timer(); |
408 | 408 | ||
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index e77995a6e3ed..8eff8c1d40a6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { | 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
125 | atomic_set(&uc_pool->status, 0); | 125 | atomic_set(&uc_pool->status, 0); |
126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, | 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); |
127 | 0, 1); | ||
128 | if (status || atomic_read(&uc_pool->status)) | 127 | if (status || atomic_read(&uc_pool->status)) |
129 | goto failed; | 128 | goto failed; |
130 | } else if (status != PAL_VISIBILITY_OK) | 129 | } else if (status != PAL_VISIBILITY_OK) |
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
146 | if (status != PAL_STATUS_SUCCESS) | 145 | if (status != PAL_STATUS_SUCCESS) |
147 | goto failed; | 146 | goto failed; |
148 | atomic_set(&uc_pool->status, 0); | 147 | atomic_set(&uc_pool->status, 0); |
149 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); | 148 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); |
150 | if (status || atomic_read(&uc_pool->status)) | 149 | if (status || atomic_read(&uc_pool->status)) |
151 | goto failed; | 150 | goto failed; |
152 | 151 | ||