aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-04-17 13:13:09 -0400
committerTony Luck <tony.luck@intel.com>2008-04-17 13:13:09 -0400
commita49072bb367a94a9af6c6a6dcbaa1dc5617d7fa0 (patch)
tree155159e2ace7175298465b6fbf1024c77de8edce /arch/ia64/kernel
parentfc494d6c1825de37f04abe147741d50be08403ab (diff)
parent31a6b11fed6ceec07ec4bdfefae56b8252d450cf (diff)
Pull kvm-patches into release branch
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/mca.c49
-rw-r--r--arch/ia64/kernel/mca_asm.S5
-rw-r--r--arch/ia64/kernel/smp.c82
3 files changed, 136 insertions, 0 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6c18221dba36..607006a6a976 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -97,6 +97,7 @@
97 97
98#include <asm/irq.h> 98#include <asm/irq.h>
99#include <asm/hw_irq.h> 99#include <asm/hw_irq.h>
100#include <asm/tlb.h>
100 101
101#include "mca_drv.h" 102#include "mca_drv.h"
102#include "entry.h" 103#include "entry.h"
@@ -112,6 +113,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
112DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 113DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
113DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 114DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
114DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ 115DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
116DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
115 117
116unsigned long __per_cpu_mca[NR_CPUS]; 118unsigned long __per_cpu_mca[NR_CPUS];
117 119
@@ -1182,6 +1184,49 @@ all_in:
1182 return; 1184 return;
1183} 1185}
1184 1186
1187/* mca_insert_tr
1188 *
1189 * Switch rid when TR reload and needed!
1190 * iord: 1: itr, 2: itr;
1191 *
1192*/
1193static void mca_insert_tr(u64 iord)
1194{
1195
1196 int i;
1197 u64 old_rr;
1198 struct ia64_tr_entry *p;
1199 unsigned long psr;
1200 int cpu = smp_processor_id();
1201
1202 psr = ia64_clear_ic();
1203 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1204 p = &__per_cpu_idtrs[cpu][iord-1][i];
1205 if (p->pte & 0x1) {
1206 old_rr = ia64_get_rr(p->ifa);
1207 if (old_rr != p->rr) {
1208 ia64_set_rr(p->ifa, p->rr);
1209 ia64_srlz_d();
1210 }
1211 ia64_ptr(iord, p->ifa, p->itir >> 2);
1212 ia64_srlz_i();
1213 if (iord & 0x1) {
1214 ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
1215 ia64_srlz_i();
1216 }
1217 if (iord & 0x2) {
1218 ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
1219 ia64_srlz_i();
1220 }
1221 if (old_rr != p->rr) {
1222 ia64_set_rr(p->ifa, old_rr);
1223 ia64_srlz_d();
1224 }
1225 }
1226 }
1227 ia64_set_psr(psr);
1228}
1229
1185/* 1230/*
1186 * ia64_mca_handler 1231 * ia64_mca_handler
1187 * 1232 *
@@ -1271,6 +1316,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1271 monarch_cpu = -1; 1316 monarch_cpu = -1;
1272#endif 1317#endif
1273 } 1318 }
1319 if (__get_cpu_var(ia64_mca_tr_reload)) {
1320 mca_insert_tr(0x1); /*Reload dynamic itrs*/
1321 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1322 }
1274 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1323 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1275 == NOTIFY_STOP) 1324 == NOTIFY_STOP)
1276 ia64_mca_spin(__func__); 1325 ia64_mca_spin(__func__);
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 8bc7d259e0c6..a06d46548ff9 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -219,8 +219,13 @@ ia64_reload_tr:
219 mov r20=IA64_TR_CURRENT_STACK 219 mov r20=IA64_TR_CURRENT_STACK
220 ;; 220 ;;
221 itr.d dtr[r20]=r16 221 itr.d dtr[r20]=r16
222 GET_THIS_PADDR(r2, ia64_mca_tr_reload)
223 mov r18 = 1
222 ;; 224 ;;
223 srlz.d 225 srlz.d
226 ;;
227 st8 [r2] =r18
228 ;;
224 229
225done_tlb_purge_and_reload: 230done_tlb_purge_and_reload:
226 231
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 4e446aa5f4ac..9a9d4c489330 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -213,6 +213,19 @@ send_IPI_allbutself (int op)
213 * Called with preemption disabled. 213 * Called with preemption disabled.
214 */ 214 */
215static inline void 215static inline void
216send_IPI_mask(cpumask_t mask, int op)
217{
218 unsigned int cpu;
219
220 for_each_cpu_mask(cpu, mask) {
221 send_IPI_single(cpu, op);
222 }
223}
224
225/*
226 * Called with preemption disabled.
227 */
228static inline void
216send_IPI_all (int op) 229send_IPI_all (int op)
217{ 230{
218 int i; 231 int i;
@@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
401} 414}
402EXPORT_SYMBOL(smp_call_function_single); 415EXPORT_SYMBOL(smp_call_function_single);
403 416
417/**
418 * smp_call_function_mask(): Run a function on a set of other CPUs.
419 * <mask> The set of cpus to run on. Must not include the current cpu.
420 * <func> The function to run. This must be fast and non-blocking.
421 * <info> An arbitrary pointer to pass to the function.
422 * <wait> If true, wait (atomically) until function
423 * has completed on other CPUs.
424 *
425 * Returns 0 on success, else a negative status code.
426 *
427 * If @wait is true, then returns once @func has returned; otherwise
428 * it returns just before the target cpu calls @func.
429 *
430 * You must not call this function with disabled interrupts or from a
431 * hardware interrupt handler or from a bottom half handler.
432 */
433int smp_call_function_mask(cpumask_t mask,
434 void (*func)(void *), void *info,
435 int wait)
436{
437 struct call_data_struct data;
438 cpumask_t allbutself;
439 int cpus;
440
441 spin_lock(&call_lock);
442 allbutself = cpu_online_map;
443 cpu_clear(smp_processor_id(), allbutself);
444
445 cpus_and(mask, mask, allbutself);
446 cpus = cpus_weight(mask);
447 if (!cpus) {
448 spin_unlock(&call_lock);
449 return 0;
450 }
451
452 /* Can deadlock when called with interrupts disabled */
453 WARN_ON(irqs_disabled());
454
455 data.func = func;
456 data.info = info;
457 atomic_set(&data.started, 0);
458 data.wait = wait;
459 if (wait)
460 atomic_set(&data.finished, 0);
461
462 call_data = &data;
463 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
464
465 /* Send a message to other CPUs */
466 if (cpus_equal(mask, allbutself))
467 send_IPI_allbutself(IPI_CALL_FUNC);
468 else
469 send_IPI_mask(mask, IPI_CALL_FUNC);
470
471 /* Wait for response */
472 while (atomic_read(&data.started) != cpus)
473 cpu_relax();
474
475 if (wait)
476 while (atomic_read(&data.finished) != cpus)
477 cpu_relax();
478 call_data = NULL;
479
480 spin_unlock(&call_lock);
481 return 0;
482
483}
484EXPORT_SYMBOL(smp_call_function_mask);
485
404/* 486/*
405 * this function sends a 'generic call function' IPI to all other CPUs 487 * this function sends a 'generic call function' IPI to all other CPUs
406 * in the system. 488 * in the system.