aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/mca.c6
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c16
-rw-r--r--arch/ia64/kernel/process.c6
-rw-r--r--arch/ia64/kernel/sal.c7
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/smp.c254
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/kernel/time.c1
-rw-r--r--arch/ia64/kernel/uncached.c5
-rw-r--r--arch/ia64/kvm/kvm-ia64.c4
-rw-r--r--arch/ia64/kvm/kvm_fw.c2
-rw-r--r--arch/ia64/kvm/mmio.c3
-rw-r--r--arch/ia64/sn/kernel/irq.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/ia64/uv/kernel/setup.c12
21 files changed, 74 insertions, 274 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 16be41446b5b..18bcc10903b4 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING
303 303
304config SMP 304config SMP
305 bool "Symmetric multi-processing support" 305 bool "Symmetric multi-processing support"
306 select USE_GENERIC_SMP_HELPERS
306 help 307 help
307 This enables support for systems with more than one CPU. If you have 308 This enables support for systems with more than one CPU. If you have
308 a system with only one CPU, say N. If you have a system with more 309 a system with only one CPU, say N. If you have a system with more
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 3b9c8cadfd34..905d25b13d5a 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
59core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 59core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
60core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
60core-$(CONFIG_KVM) += arch/ia64/kvm/ 61core-$(CONFIG_KVM) += arch/ia64/kvm/
61 62
62drivers-$(CONFIG_PCI) += arch/ia64/pci/ 63drivers-$(CONFIG_PCI) += arch/ia64/pci/
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 853d1f11be00..43687cc60dfb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
465 printk(KERN_ERR 465 printk(KERN_ERR
466 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 466 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
467 len, slit->header.length); 467 len, slit->header.length);
468 memset(numa_slit, 10, sizeof(numa_slit));
469 return; 468 return;
470 } 469 }
471 slit_table = slit; 470 slit_table = slit;
@@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void)
574 printk(KERN_INFO "Number of memory chunks in system = %d\n", 573 printk(KERN_INFO "Number of memory chunks in system = %d\n",
575 num_node_memblks); 574 num_node_memblks);
576 575
577 if (!slit_table) 576 if (!slit_table) {
577 for (i = 0; i < MAX_NUMNODES; i++)
578 for (j = 0; j < MAX_NUMNODES; j++)
579 node_distance(i, j) = i == j ? LOCAL_DISTANCE :
580 REMOTE_DISTANCE;
578 return; 581 return;
582 }
583
579 memset(numa_slit, -1, sizeof(numa_slit)); 584 memset(numa_slit, -1, sizeof(numa_slit));
580 for (i = 0; i < slit_table->locality_count; i++) { 585 for (i = 0; i < slit_table->locality_count; i++) {
581 if (!pxm_bit_test(i)) 586 if (!pxm_bit_test(i))
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 587196dd84fd..3bc2fa64f87f 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
558 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { 558 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
559 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * 559 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
560 NR_PREALLOCATE_RTE_ENTRIES); 560 NR_PREALLOCATE_RTE_ENTRIES);
561 if (!rte)
562 return NULL;
563 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) 561 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
564 list_add(&rte->rte_list, &free_rte_list); 562 list_add(&rte->rte_list, &free_rte_list);
565 } 563 }
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 705176b434b3..7dd96c127177 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
707static void 707static void
708ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) 708ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
709{ 709{
710 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 710 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
711} 711}
712 712
713/* 713/*
@@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
719static void 719static void
720ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) 720ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
721{ 721{
722 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 722 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
723} 723}
724 724
725/* 725/*
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1881 case CPU_ONLINE: 1881 case CPU_ONLINE:
1882 case CPU_ONLINE_FROZEN: 1882 case CPU_ONLINE_FROZEN:
1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, 1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
1884 NULL, 1, 0); 1884 NULL, 0);
1885 break; 1885 break;
1886 } 1886 }
1887 return NOTIFY_OK; 1887 return NOTIFY_OK;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 9dc00f7fe10e..e5c57f413ca2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
921 921
922 922
923 /* will send IPI to other CPU and wait for completion of remote call */ 923 /* will send IPI to other CPU and wait for completion of remote call */
924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { 924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " 925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); 926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
927 return 0; 927 return 0;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 71d05133f556..19d4493c6193 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1820 int ret; 1820 int ret;
1821 1821
1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); 1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1823 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); 1823 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); 1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1825} 1825}
1826#endif /* CONFIG_SMP */ 1826#endif /* CONFIG_SMP */
@@ -1864,11 +1864,6 @@ pfm_flush(struct file *filp, fl_owner_t id)
1864 * invoked after, it will find an empty queue and no 1864 * invoked after, it will find an empty queue and no
1865 * signal will be sent. In both case, we are safe 1865 * signal will be sent. In both case, we are safe
1866 */ 1866 */
1867 if (filp->f_flags & FASYNC) {
1868 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1869 pfm_do_fasync (-1, filp, ctx, 0);
1870 }
1871
1872 PROTECT_CTX(ctx, flags); 1867 PROTECT_CTX(ctx, flags);
1873 1868
1874 state = ctx->ctx_state; 1869 state = ctx->ctx_state;
@@ -1999,6 +1994,11 @@ pfm_close(struct inode *inode, struct file *filp)
1999 return -EBADF; 1994 return -EBADF;
2000 } 1995 }
2001 1996
1997 if (filp->f_flags & FASYNC) {
1998 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1999 pfm_do_fasync(-1, filp, ctx, 0);
2000 }
2001
2002 PROTECT_CTX(ctx, flags); 2002 PROTECT_CTX(ctx, flags);
2003 2003
2004 state = ctx->ctx_state; 2004 state = ctx->ctx_state;
@@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6508 } 6508 }
6509 6509
6510 /* save the current system wide pmu states */ 6510 /* save the current system wide pmu states */
6511 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); 6511 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6512 if (ret) { 6512 if (ret) {
6513 DPRINT(("on_each_cpu() failed: %d\n", ret)); 6513 DPRINT(("on_each_cpu() failed: %d\n", ret));
6514 goto cleanup_reserve; 6514 goto cleanup_reserve;
@@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6553 6553
6554 pfm_alt_intr_handler = NULL; 6554 pfm_alt_intr_handler = NULL;
6555 6555
6556 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); 6556 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6557 if (ret) { 6557 if (ret) {
6558 DPRINT(("on_each_cpu() failed: %d\n", ret)); 6558 DPRINT(("on_each_cpu() failed: %d\n", ret));
6559 } 6559 }
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a3a34b4eb038..3ab8373103ec 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -55,6 +55,10 @@ void (*ia64_mark_idle)(int);
55 55
56unsigned long boot_option_idle_override = 0; 56unsigned long boot_option_idle_override = 0;
57EXPORT_SYMBOL(boot_option_idle_override); 57EXPORT_SYMBOL(boot_option_idle_override);
58unsigned long idle_halt;
59EXPORT_SYMBOL(idle_halt);
60unsigned long idle_nomwait;
61EXPORT_SYMBOL(idle_nomwait);
58 62
59void 63void
60ia64_do_show_stack (struct unw_frame_info *info, void *arg) 64ia64_do_show_stack (struct unw_frame_info *info, void *arg)
@@ -286,7 +290,7 @@ void cpu_idle_wait(void)
286{ 290{
287 smp_mb(); 291 smp_mb();
288 /* kick all the CPUs so that they exit out of pm_idle */ 292 /* kick all the CPUs so that they exit out of pm_idle */
289 smp_call_function(do_nothing, NULL, 0, 1); 293 smp_call_function(do_nothing, NULL, 1);
290} 294}
291EXPORT_SYMBOL_GPL(cpu_idle_wait); 295EXPORT_SYMBOL_GPL(cpu_idle_wait);
292 296
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 7e0259709c04..0464173ea568 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -252,11 +252,10 @@ check_sal_cache_flush (void)
252 local_irq_save(flags); 252 local_irq_save(flags);
253 253
254 /* 254 /*
255 * Schedule a timer interrupt, wait until it's reported, and see if 255 * Send ourselves a timer interrupt, wait until it's reported, and see
256 * SAL_CACHE_FLUSH drops it. 256 * if SAL_CACHE_FLUSH drops it.
257 */ 257 */
258 ia64_set_itv(IA64_TIMER_VECTOR); 258 platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0);
259 ia64_set_itm(ia64_get_itc() + 1000);
260 259
261 while (!ia64_get_irr(IA64_TIMER_VECTOR)) 260 while (!ia64_get_irr(IA64_TIMER_VECTOR))
262 cpu_relax(); 261 cpu_relax();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 750749551e86..e5c2de9b29a5 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -552,7 +552,8 @@ setup_arch (char **cmdline_p)
552# ifdef CONFIG_ACPI_NUMA 552# ifdef CONFIG_ACPI_NUMA
553 acpi_numa_init(); 553 acpi_numa_init();
554 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 554 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
555 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); 555 32 : cpus_weight(early_cpu_possible_map)),
556 additional_cpus > 0 ? additional_cpus : 0);
556# endif 557# endif
557#else 558#else
558# ifdef CONFIG_SMP 559# ifdef CONFIG_SMP
@@ -583,8 +584,6 @@ setup_arch (char **cmdline_p)
583 cpu_init(); /* initialize the bootstrap CPU */ 584 cpu_init(); /* initialize the bootstrap CPU */
584 mmu_context_init(); /* initialize context_id bitmap */ 585 mmu_context_init(); /* initialize context_id bitmap */
585 586
586 check_sal_cache_flush();
587
588#ifdef CONFIG_ACPI 587#ifdef CONFIG_ACPI
589 acpi_boot_init(); 588 acpi_boot_init();
590#endif 589#endif
@@ -617,6 +616,7 @@ setup_arch (char **cmdline_p)
617 ia64_mca_init(); 616 ia64_mca_init();
618 617
619 platform_setup(cmdline_p); 618 platform_setup(cmdline_p);
619 check_sal_cache_flush();
620 paging_init(); 620 paging_init();
621} 621}
622 622
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 983296f1c813..3676468612b6 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts {
60 60
61static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; 61static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
62 62
63
64/*
65 * Structure and data for smp_call_function(). This is designed to minimise static memory
66 * requirements. It also looks cleaner.
67 */
68static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
69
70struct call_data_struct {
71 void (*func) (void *info);
72 void *info;
73 long wait;
74 atomic_t started;
75 atomic_t finished;
76};
77
78static volatile struct call_data_struct *call_data;
79
80#define IPI_CALL_FUNC 0 63#define IPI_CALL_FUNC 0
81#define IPI_CPU_STOP 1 64#define IPI_CPU_STOP 1
65#define IPI_CALL_FUNC_SINGLE 2
82#define IPI_KDUMP_CPU_STOP 3 66#define IPI_KDUMP_CPU_STOP 3
83 67
84/* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 68/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
86 70
87extern void cpu_halt (void); 71extern void cpu_halt (void);
88 72
89void
90lock_ipi_calllock(void)
91{
92 spin_lock_irq(&call_lock);
93}
94
95void
96unlock_ipi_calllock(void)
97{
98 spin_unlock_irq(&call_lock);
99}
100
101static inline void
102handle_call_data(void)
103{
104 struct call_data_struct *data;
105 void (*func)(void *info);
106 void *info;
107 int wait;
108
109 /* release the 'pointer lock' */
110 data = (struct call_data_struct *)call_data;
111 func = data->func;
112 info = data->info;
113 wait = data->wait;
114
115 mb();
116 atomic_inc(&data->started);
117 /* At this point the structure may be gone unless wait is true. */
118 (*func)(info);
119
120 /* Notify the sending CPU that the task is done. */
121 mb();
122 if (wait)
123 atomic_inc(&data->finished);
124}
125
126static void 73static void
127stop_this_cpu(void) 74stop_this_cpu(void)
128{ 75{
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id)
163 ops &= ~(1 << which); 110 ops &= ~(1 << which);
164 111
165 switch (which) { 112 switch (which) {
166 case IPI_CALL_FUNC:
167 handle_call_data();
168 break;
169
170 case IPI_CPU_STOP: 113 case IPI_CPU_STOP:
171 stop_this_cpu(); 114 stop_this_cpu();
172 break; 115 break;
116 case IPI_CALL_FUNC:
117 generic_smp_call_function_interrupt();
118 break;
119 case IPI_CALL_FUNC_SINGLE:
120 generic_smp_call_function_single_interrupt();
121 break;
173#ifdef CONFIG_KEXEC 122#ifdef CONFIG_KEXEC
174 case IPI_KDUMP_CPU_STOP: 123 case IPI_KDUMP_CPU_STOP:
175 unw_init_running(kdump_cpu_freeze, NULL); 124 unw_init_running(kdump_cpu_freeze, NULL);
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id)
187 return IRQ_HANDLED; 136 return IRQ_HANDLED;
188} 137}
189 138
139
140
190/* 141/*
191 * Called with preemption disabled. 142 * Called with preemption disabled.
192 */ 143 */
@@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
334void 285void
335smp_flush_tlb_all (void) 286smp_flush_tlb_all (void)
336{ 287{
337 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); 288 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
338} 289}
339 290
340void 291void
@@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm)
357 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is 308 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
358 * rather trivial. 309 * rather trivial.
359 */ 310 */
360 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); 311 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
361} 312}
362 313
363/* 314void arch_send_call_function_single_ipi(int cpu)
364 * Run a function on a specific CPU
365 * <func> The function to run. This must be fast and non-blocking.
366 * <info> An arbitrary pointer to pass to the function.
367 * <nonatomic> Currently unused.
368 * <wait> If true, wait until function has completed on other CPUs.
369 * [RETURNS] 0 on success, else a negative status code.
370 *
371 * Does not return until the remote CPU is nearly ready to execute <func>
372 * or is or has executed.
373 */
374
375int
376smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
377 int wait)
378{
379 struct call_data_struct data;
380 int cpus = 1;
381 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
382
383 if (cpuid == me) {
384 local_irq_disable();
385 func(info);
386 local_irq_enable();
387 put_cpu();
388 return 0;
389 }
390
391 data.func = func;
392 data.info = info;
393 atomic_set(&data.started, 0);
394 data.wait = wait;
395 if (wait)
396 atomic_set(&data.finished, 0);
397
398 spin_lock_bh(&call_lock);
399
400 call_data = &data;
401 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
402 send_IPI_single(cpuid, IPI_CALL_FUNC);
403
404 /* Wait for response */
405 while (atomic_read(&data.started) != cpus)
406 cpu_relax();
407
408 if (wait)
409 while (atomic_read(&data.finished) != cpus)
410 cpu_relax();
411 call_data = NULL;
412
413 spin_unlock_bh(&call_lock);
414 put_cpu();
415 return 0;
416}
417EXPORT_SYMBOL(smp_call_function_single);
418
419/**
420 * smp_call_function_mask(): Run a function on a set of other CPUs.
421 * <mask> The set of cpus to run on. Must not include the current cpu.
422 * <func> The function to run. This must be fast and non-blocking.
423 * <info> An arbitrary pointer to pass to the function.
424 * <wait> If true, wait (atomically) until function
425 * has completed on other CPUs.
426 *
427 * Returns 0 on success, else a negative status code.
428 *
429 * If @wait is true, then returns once @func has returned; otherwise
430 * it returns just before the target cpu calls @func.
431 *
432 * You must not call this function with disabled interrupts or from a
433 * hardware interrupt handler or from a bottom half handler.
434 */
435int smp_call_function_mask(cpumask_t mask,
436 void (*func)(void *), void *info,
437 int wait)
438{ 315{
439 struct call_data_struct data; 316 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
440 cpumask_t allbutself;
441 int cpus;
442
443 spin_lock(&call_lock);
444 allbutself = cpu_online_map;
445 cpu_clear(smp_processor_id(), allbutself);
446
447 cpus_and(mask, mask, allbutself);
448 cpus = cpus_weight(mask);
449 if (!cpus) {
450 spin_unlock(&call_lock);
451 return 0;
452 }
453
454 /* Can deadlock when called with interrupts disabled */
455 WARN_ON(irqs_disabled());
456
457 data.func = func;
458 data.info = info;
459 atomic_set(&data.started, 0);
460 data.wait = wait;
461 if (wait)
462 atomic_set(&data.finished, 0);
463
464 call_data = &data;
465 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
466
467 /* Send a message to other CPUs */
468 if (cpus_equal(mask, allbutself))
469 send_IPI_allbutself(IPI_CALL_FUNC);
470 else
471 send_IPI_mask(mask, IPI_CALL_FUNC);
472
473 /* Wait for response */
474 while (atomic_read(&data.started) != cpus)
475 cpu_relax();
476
477 if (wait)
478 while (atomic_read(&data.finished) != cpus)
479 cpu_relax();
480 call_data = NULL;
481
482 spin_unlock(&call_lock);
483 return 0;
484
485} 317}
486EXPORT_SYMBOL(smp_call_function_mask);
487 318
488/* 319void arch_send_call_function_ipi(cpumask_t mask)
489 * this function sends a 'generic call function' IPI to all other CPUs
490 * in the system.
491 */
492
493/*
494 * [SUMMARY] Run a function on all other CPUs.
495 * <func> The function to run. This must be fast and non-blocking.
496 * <info> An arbitrary pointer to pass to the function.
497 * <nonatomic> currently unused.
498 * <wait> If true, wait (atomically) until function has completed on other CPUs.
499 * [RETURNS] 0 on success, else a negative status code.
500 *
501 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
502 * executed.
503 *
504 * You must not call this function with disabled interrupts or from a
505 * hardware interrupt handler or from a bottom half handler.
506 */
507int
508smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
509{ 320{
510 struct call_data_struct data; 321 send_IPI_mask(mask, IPI_CALL_FUNC);
511 int cpus;
512
513 spin_lock(&call_lock);
514 cpus = num_online_cpus() - 1;
515 if (!cpus) {
516 spin_unlock(&call_lock);
517 return 0;
518 }
519
520 /* Can deadlock when called with interrupts disabled */
521 WARN_ON(irqs_disabled());
522
523 data.func = func;
524 data.info = info;
525 atomic_set(&data.started, 0);
526 data.wait = wait;
527 if (wait)
528 atomic_set(&data.finished, 0);
529
530 call_data = &data;
531 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
532 send_IPI_allbutself(IPI_CALL_FUNC);
533
534 /* Wait for response */
535 while (atomic_read(&data.started) != cpus)
536 cpu_relax();
537
538 if (wait)
539 while (atomic_read(&data.finished) != cpus)
540 cpu_relax();
541 call_data = NULL;
542
543 spin_unlock(&call_lock);
544 return 0;
545} 322}
546EXPORT_SYMBOL(smp_call_function);
547 323
548/* 324/*
549 * this function calls the 'stop' function on all other CPUs in the system. 325 * this function calls the 'stop' function on all other CPUs in the system.
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 933f38811528..03f1a9908afc 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -318,7 +318,7 @@ ia64_sync_itc (unsigned int master)
318 318
319 go[MASTER] = 1; 319 go[MASTER] = 1;
320 320
321 if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { 321 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
322 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 322 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
323 return; 323 return;
324 } 324 }
@@ -396,14 +396,14 @@ smp_callin (void)
396 396
397 fix_b0_for_bsp(); 397 fix_b0_for_bsp();
398 398
399 lock_ipi_calllock(); 399 ipi_call_lock_irq();
400 spin_lock(&vector_lock); 400 spin_lock(&vector_lock);
401 /* Setup the per cpu irq handling data structures */ 401 /* Setup the per cpu irq handling data structures */
402 __setup_vector_irq(cpuid); 402 __setup_vector_irq(cpuid);
403 cpu_set(cpuid, cpu_online_map); 403 cpu_set(cpuid, cpu_online_map);
404 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 404 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
405 spin_unlock(&vector_lock); 405 spin_unlock(&vector_lock);
406 unlock_ipi_calllock(); 406 ipi_call_unlock_irq();
407 407
408 smp_setup_percpu_timer(); 408 smp_setup_percpu_timer();
409 409
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 046ca89efc05..65c10a42c88f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -130,6 +130,7 @@ void account_system_vtime(struct task_struct *tsk)
130 130
131 local_irq_restore(flags); 131 local_irq_restore(flags);
132} 132}
133EXPORT_SYMBOL_GPL(account_system_vtime);
133 134
134/* 135/*
135 * Called from the timer interrupt handler to charge accumulated user time 136 * Called from the timer interrupt handler to charge accumulated user time
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index e77995a6e3ed..8eff8c1d40a6 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
125 atomic_set(&uc_pool->status, 0); 125 atomic_set(&uc_pool->status, 0);
126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
127 0, 1);
128 if (status || atomic_read(&uc_pool->status)) 127 if (status || atomic_read(&uc_pool->status))
129 goto failed; 128 goto failed;
130 } else if (status != PAL_VISIBILITY_OK) 129 } else if (status != PAL_VISIBILITY_OK)
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
146 if (status != PAL_STATUS_SUCCESS) 145 if (status != PAL_STATUS_SUCCESS)
147 goto failed; 146 goto failed;
148 atomic_set(&uc_pool->status, 0); 147 atomic_set(&uc_pool->status, 0);
149 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); 148 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
150 if (status || atomic_read(&uc_pool->status)) 149 if (status || atomic_read(&uc_pool->status))
151 goto failed; 150 goto failed;
152 151
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 318b81100623..68c978be9a51 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -395,7 +395,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
395 if (kvm->vcpus[i]->cpu != -1) { 395 if (kvm->vcpus[i]->cpu != -1) {
396 call_data.vcpu = kvm->vcpus[i]; 396 call_data.vcpu = kvm->vcpus[i];
397 smp_call_function_single(kvm->vcpus[i]->cpu, 397 smp_call_function_single(kvm->vcpus[i]->cpu,
398 vcpu_global_purge, &call_data, 0, 1); 398 vcpu_global_purge, &call_data, 1);
399 } else 399 } else
400 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); 400 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
401 401
@@ -1693,7 +1693,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1693 wake_up_interruptible(&vcpu->wq); 1693 wake_up_interruptible(&vcpu->wq);
1694 1694
1695 if (vcpu->guest_mode) 1695 if (vcpu->guest_mode)
1696 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); 1696 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
1697} 1697}
1698 1698
1699int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) 1699int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index 091f936c4485..0c69d9ec92d4 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -130,7 +130,7 @@ static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
130 args.cache_type = gr29; 130 args.cache_type = gr29;
131 args.operation = gr30; 131 args.operation = gr30;
132 smp_call_function(remote_pal_cache_flush, 132 smp_call_function(remote_pal_cache_flush,
133 (void *)&args, 1, 1); 133 (void *)&args, 1);
134 if (args.status != 0) 134 if (args.status != 0)
135 printk(KERN_ERR"pal_cache_flush error!," 135 printk(KERN_ERR"pal_cache_flush error!,"
136 "status:0x%lx\n", args.status); 136 "status:0x%lx\n", args.status);
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 351bf70da463..7f1a858bc69f 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
159 159
160 if (p->u.ioreq.state == STATE_IORESP_READY) { 160 if (p->u.ioreq.state == STATE_IORESP_READY) {
161 if (dir == IOREQ_READ) 161 if (dir == IOREQ_READ)
162 *dest = p->u.ioreq.data; 162 /* it's necessary to ensure zero extending */
163 *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
163 } else 164 } else
164 panic_vm(vcpu); 165 panic_vm(vcpu);
165out: 166out:
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 53351c3cd7b1..96c31b4180c3 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -11,6 +11,7 @@
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/rculist.h>
14#include <asm/sn/addrs.h> 15#include <asm/sn/addrs.h>
15#include <asm/sn/arch.h> 16#include <asm/sn/arch.h>
16#include <asm/sn/intr.h> 17#include <asm/sn/intr.h>
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 49d3120415eb..e585f9a2afb9 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -512,6 +512,8 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si
512 int cpu; 512 int cpu;
513 char optstr[64]; 513 char optstr[64];
514 514
515 if (count == 0 || count > sizeof(optstr))
516 return -EINVAL;
515 if (copy_from_user(optstr, user, count)) 517 if (copy_from_user(optstr, user, count))
516 return -EFAULT; 518 return -EFAULT;
517 optstr[count - 1] = '\0'; 519 optstr[count - 1] = '\0';
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 8cc0c4753d89..636588e7e068 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
629 if (use_ipi) { 629 if (use_ipi) {
630 /* use an interprocessor interrupt to call SAL */ 630 /* use an interprocessor interrupt to call SAL */
631 smp_call_function_single(cpu, sn_hwperf_call_sal, 631 smp_call_function_single(cpu, sn_hwperf_call_sal,
632 op_info, 1, 1); 632 op_info, 1);
633 } 633 }
634 else { 634 else {
635 /* migrate the task before calling SAL */ 635 /* migrate the task before calling SAL */
diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c
index 9aa743203c3c..cf5f28ae96c4 100644
--- a/arch/ia64/uv/kernel/setup.c
+++ b/arch/ia64/uv/kernel/setup.c
@@ -17,6 +17,9 @@
17DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 17DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
18EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); 18EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
19 19
20#ifdef CONFIG_IA64_SGI_UV
21int sn_prom_type;
22#endif
20 23
21struct redir_addr { 24struct redir_addr {
22 unsigned long redirect; 25 unsigned long redirect;
@@ -64,6 +67,15 @@ void __init uv_setup(char **cmdline_p)
64 m_n_config.s.m_skt = 37; 67 m_n_config.s.m_skt = 37;
65 m_n_config.s.n_skt = 0; 68 m_n_config.s.n_skt = 0;
66 mmr_base = 0; 69 mmr_base = 0;
70#if 0
71 /* Need BIOS calls - TDB */
72 if (!ia64_sn_is_fake_prom())
73 sn_prom_type = 1;
74 else
75#endif
76 sn_prom_type = 2;
77 printk(KERN_INFO "Running on medusa with %s PROM\n",
78 (sn_prom_type == 1) ? "real" : "fake");
67 } else { 79 } else {
68 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 80 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
69 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 81 node_id.v = uv_read_local_mmr(UVH_NODE_ID);