aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-28 18:05:50 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-28 18:05:50 -0400
commitb0d44c0dbbd52effb731b1c0af9afd56215c48de (patch)
tree3237c0087d91a5390aed05689b9f610ba16fa116 /arch/ia64
parent9537a48ed4b9e4b738943d6da0a0fd4278adf905 (diff)
parent7c730ccdc1188b97f5c8cb690906242c7ed75c22 (diff)
Merge branch 'linus' into core/iommu
Conflicts: arch/x86/Kconfig
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/include/asm/fpu.h2
-rw-r--r--arch/ia64/include/asm/gcc_intrin.h1
-rw-r--r--arch/ia64/include/asm/intrinsics.h1
-rw-r--r--arch/ia64/include/asm/kvm.h52
-rw-r--r--arch/ia64/include/asm/kvm_host.h18
-rw-r--r--arch/ia64/include/asm/msidef.h42
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/socket.h3
-rw-r--r--arch/ia64/include/asm/swab.h2
-rw-r--r--arch/ia64/include/asm/topology.h2
-rw-r--r--arch/ia64/include/asm/uv/uv.h13
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/irq.c6
-rw-r--r--arch/ia64/kernel/irq_ia64.c31
-rw-r--r--arch/ia64/kernel/msi_ia64.c59
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/kvm/Kconfig4
-rw-r--r--arch/ia64/kvm/irq.h2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c125
-rw-r--r--arch/ia64/kvm/kvm_fw.c151
-rw-r--r--arch/ia64/kvm/process.c71
-rw-r--r--arch/ia64/kvm/vcpu.c44
-rw-r--r--arch/ia64/kvm/vcpu.h4
-rw-r--r--arch/ia64/kvm/vtlb.c44
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c2
28 files changed, 507 insertions, 197 deletions
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f8395e9a5..af9405cd70e5 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@ ia32_syscall_table:
240 data8 sys_ni_syscall 240 data8 sys_ni_syscall
241 data8 sys_umask /* 60 */ 241 data8 sys_umask /* 60 */
242 data8 sys_chroot 242 data8 sys_chroot
243 data8 sys_ustat 243 data8 compat_sys_ustat
244 data8 sys_dup2 244 data8 sys_dup2
245 data8 sys_getppid 245 data8 sys_getppid
246 data8 sys_getpgrp /* 65 */ 246 data8 sys_getpgrp /* 65 */
diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h
index 3859558ff0a4..0c26157cffa5 100644
--- a/arch/ia64/include/asm/fpu.h
+++ b/arch/ia64/include/asm/fpu.h
@@ -6,8 +6,6 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8 8
9#include <asm/types.h>
10
11/* floating point status register: */ 9/* floating point status register: */
12#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ 10#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
13#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ 11#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h
index 0f5b55921758..c2c5fd8fcac4 100644
--- a/arch/ia64/include/asm/gcc_intrin.h
+++ b/arch/ia64/include/asm/gcc_intrin.h
@@ -6,6 +6,7 @@
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> 6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7 */ 7 */
8 8
9#include <linux/types.h>
9#include <linux/compiler.h> 10#include <linux/compiler.h>
10 11
11/* define this macro to get some asm stmts included in 'c' files */ 12/* define this macro to get some asm stmts included in 'c' files */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index a3e44a5ed497..c47830e26cb7 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -10,6 +10,7 @@
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12 12
13#include <linux/types.h>
13/* include compiler specific intrinsics */ 14/* include compiler specific intrinsics */
14#include <asm/ia64regs.h> 15#include <asm/ia64regs.h>
15#ifdef __INTEL_COMPILER 16#ifdef __INTEL_COMPILER
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index bfa86b6af7cd..18a7e49abbc5 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -21,8 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <asm/types.h> 24#include <linux/types.h>
25
26#include <linux/ioctl.h> 25#include <linux/ioctl.h>
27 26
28/* Select x86 specific features in <linux/kvm.h> */ 27/* Select x86 specific features in <linux/kvm.h> */
@@ -166,7 +165,40 @@ struct saved_vpd {
166 unsigned long vcpuid[5]; 165 unsigned long vcpuid[5];
167 unsigned long vpsr; 166 unsigned long vpsr;
168 unsigned long vpr; 167 unsigned long vpr;
169 unsigned long vcr[128]; 168 union {
169 unsigned long vcr[128];
170 struct {
171 unsigned long dcr;
172 unsigned long itm;
173 unsigned long iva;
174 unsigned long rsv1[5];
175 unsigned long pta;
176 unsigned long rsv2[7];
177 unsigned long ipsr;
178 unsigned long isr;
179 unsigned long rsv3;
180 unsigned long iip;
181 unsigned long ifa;
182 unsigned long itir;
183 unsigned long iipa;
184 unsigned long ifs;
185 unsigned long iim;
186 unsigned long iha;
187 unsigned long rsv4[38];
188 unsigned long lid;
189 unsigned long ivr;
190 unsigned long tpr;
191 unsigned long eoi;
192 unsigned long irr[4];
193 unsigned long itv;
194 unsigned long pmv;
195 unsigned long cmcv;
196 unsigned long rsv5[5];
197 unsigned long lrr0;
198 unsigned long lrr1;
199 unsigned long rsv6[46];
200 };
201 };
170}; 202};
171 203
172struct kvm_regs { 204struct kvm_regs {
@@ -214,4 +246,18 @@ struct kvm_sregs {
214struct kvm_fpu { 246struct kvm_fpu {
215}; 247};
216 248
249#define KVM_IA64_VCPU_STACK_SHIFT 16
250#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
251
252struct kvm_ia64_vcpu_stack {
253 unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
254};
255
256struct kvm_debug_exit_arch {
257};
258
259/* for KVM_SET_GUEST_DEBUG */
260struct kvm_guest_debug_arch {
261};
262
217#endif 263#endif
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 348663661659..4542651e6acb 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -112,7 +112,11 @@
112#define VCPU_STRUCT_SHIFT 16 112#define VCPU_STRUCT_SHIFT 16
113#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) 113#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
114 114
115#define KVM_STK_OFFSET VCPU_STRUCT_SIZE 115/*
116 * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
117 */
118#define KVM_STK_SHIFT 16
119#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
116 120
117#define KVM_VM_STRUCT_SHIFT 19 121#define KVM_VM_STRUCT_SHIFT 19
118#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) 122#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
@@ -153,10 +157,10 @@ struct kvm_vm_data {
153 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; 157 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
154}; 158};
155 159
156#define VCPU_BASE(n) KVM_VM_DATA_BASE + \ 160#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
157 offsetof(struct kvm_vm_data, vcpu_data[n]) 161 offsetof(struct kvm_vm_data, vcpu_data[n]))
158#define VM_BASE KVM_VM_DATA_BASE + \ 162#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
159 offsetof(struct kvm_vm_data, kvm_vm_struct) 163 offsetof(struct kvm_vm_data, kvm_vm_struct))
160#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ 164#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
161 offsetof(struct kvm_vm_data, kvm_mem_dirty_log) 165 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
162 166
@@ -235,8 +239,6 @@ struct kvm_vm_data {
235 239
236struct kvm; 240struct kvm;
237struct kvm_vcpu; 241struct kvm_vcpu;
238struct kvm_guest_debug{
239};
240 242
241struct kvm_mmio_req { 243struct kvm_mmio_req {
242 uint64_t addr; /* physical address */ 244 uint64_t addr; /* physical address */
@@ -462,6 +464,8 @@ struct kvm_arch {
462 unsigned long metaphysical_rr4; 464 unsigned long metaphysical_rr4;
463 unsigned long vmm_init_rr; 465 unsigned long vmm_init_rr;
464 466
467 int online_vcpus;
468
465 struct kvm_ioapic *vioapic; 469 struct kvm_ioapic *vioapic;
466 struct kvm_vm_stat stat; 470 struct kvm_vm_stat stat;
467 struct kvm_sal_data rdv_sal_data; 471 struct kvm_sal_data rdv_sal_data;
diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h
new file mode 100644
index 000000000000..592c1047a0c5
--- /dev/null
+++ b/arch/ia64/include/asm/msidef.h
@@ -0,0 +1,42 @@
1#ifndef _IA64_MSI_DEF_H
2#define _IA64_MSI_DEF_H
3
4/*
5 * Shifts for APIC-based data
6 */
7
8#define MSI_DATA_VECTOR_SHIFT 0
9#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
10#define MSI_DATA_VECTOR_MASK 0xffffff00
11
12#define MSI_DATA_DELIVERY_MODE_SHIFT 8
13#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
14#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
15
16#define MSI_DATA_LEVEL_SHIFT 14
17#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
18#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
19
20#define MSI_DATA_TRIGGER_SHIFT 15
21#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
22#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
23
24/*
25 * Shift/mask fields for APIC-based bus address
26 */
27
28#define MSI_ADDR_DEST_ID_SHIFT 4
29#define MSI_ADDR_HEADER 0xfee00000
30
31#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
32#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
33
34#define MSI_ADDR_DEST_MODE_SHIFT 2
35#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
36#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
37
38#define MSI_ADDR_REDIRECTION_SHIFT 3
39#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
40#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
41
42#endif/* _IA64_MSI_DEF_H */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 77f30b664b4e..30cf46534dd2 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
27 27
28#else /* ! SMP */ 28#else /* ! SMP */
29 29
30#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
31
32#define per_cpu_init() (__phys_per_cpu_start) 30#define per_cpu_init() (__phys_per_cpu_start)
33 31
34#endif /* SMP */ 32#endif /* SMP */
35 33
34#define PER_CPU_BASE_SECTION ".data.percpu"
35
36/* 36/*
37 * Be extremely careful when taking the address of this variable! Due to virtual 37 * Be extremely careful when taking the address of this variable! Due to virtual
38 * remapping, it is different from the canonical address returned by __get_cpu_var(var)! 38 * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index d5ef0aa3e312..745421225ec6 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -63,4 +63,7 @@
63 63
64#define SO_MARK 36 64#define SO_MARK 36
65 65
66#define SO_TIMESTAMPING 37
67#define SCM_TIMESTAMPING SO_TIMESTAMPING
68
66#endif /* _ASM_IA64_SOCKET_H */ 69#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/swab.h b/arch/ia64/include/asm/swab.h
index 6aa58b699eea..c89a8cb5d8a5 100644
--- a/arch/ia64/include/asm/swab.h
+++ b/arch/ia64/include/asm/swab.h
@@ -6,7 +6,7 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. 6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
7 */ 7 */
8 8
9#include <asm/types.h> 9#include <linux/types.h>
10#include <asm/intrinsics.h> 10#include <asm/intrinsics.h>
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12 12
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 32f3af1641c5..3193f4417e16 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
84 .child = NULL, \ 84 .child = NULL, \
85 .groups = NULL, \ 85 .groups = NULL, \
86 .min_interval = 8, \ 86 .min_interval = 8, \
87 .max_interval = 8*(min(num_online_cpus(), 32)), \ 87 .max_interval = 8*(min(num_online_cpus(), 32U)), \
88 .busy_factor = 64, \ 88 .busy_factor = 64, \
89 .imbalance_pct = 125, \ 89 .imbalance_pct = 125, \
90 .cache_nice_tries = 2, \ 90 .cache_nice_tries = 2, \
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h
new file mode 100644
index 000000000000..61b5bdfd980e
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_IA64_UV_UV_H
2#define _ASM_IA64_UV_UV_H
3
4#include <asm/system.h>
5#include <asm/sn/simulator.h>
6
7static inline int is_uv_system(void)
8{
9 /* temporary support for running on hardware simulator */
10 return IS_MEDUSA() || ia64_platform_is("uv");
11}
12
13#endif /* _ASM_IA64_UV_UV_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d541671caf4a..bdef2ce38c8b 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
199 return __va(phys_addr); 199 return __va(phys_addr);
200} 200}
201 201
202void __init __acpi_unmap_table(char *map, unsigned long size)
203{
204}
205
202/* -------------------------------------------------------------------------- 206/* --------------------------------------------------------------------------
203 Boot-time Table Parsing 207 Boot-time Table Parsing
204 -------------------------------------------------------------------------- */ 208 -------------------------------------------------------------------------- */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index e13125058bed..166e0d839fa0 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
880 if (iosapic_intr_info[irq].count == 0) { 880 if (iosapic_intr_info[irq].count == 0) {
881#ifdef CONFIG_SMP 881#ifdef CONFIG_SMP
882 /* Clear affinity */ 882 /* Clear affinity */
883 cpus_setall(idesc->affinity); 883 cpumask_setall(idesc->affinity);
884#endif 884#endif
885 /* Clear the interrupt information */ 885 /* Clear the interrupt information */
886 iosapic_intr_info[irq].dest = 0; 886 iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index a58f64ca9f0e..7429752ef5ad 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -80,7 +80,7 @@ int show_interrupts(struct seq_file *p, void *v)
80 seq_printf(p, "%10u ", kstat_irqs(i)); 80 seq_printf(p, "%10u ", kstat_irqs(i));
81#else 81#else
82 for_each_online_cpu(j) { 82 for_each_online_cpu(j) {
83 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 83 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
84 } 84 }
85#endif 85#endif
86 seq_printf(p, " %14s", irq_desc[i].chip->name); 86 seq_printf(p, " %14s", irq_desc[i].chip->name);
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
103void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 103void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
104{ 104{
105 if (irq < NR_IRQS) { 105 if (irq < NR_IRQS) {
106 cpumask_copy(&irq_desc[irq].affinity, 106 cpumask_copy(irq_desc[irq].affinity,
107 cpumask_of(cpu_logical_id(hwid))); 107 cpumask_of(cpu_logical_id(hwid)));
108 irq_redir[irq] = (char) (redir & 0xff); 108 irq_redir[irq] = (char) (redir & 0xff);
109 } 109 }
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
148 if (desc->status == IRQ_PER_CPU) 148 if (desc->status == IRQ_PER_CPU)
149 continue; 149 continue;
150 150
151 if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) 151 if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
152 >= nr_cpu_ids) { 152 >= nr_cpu_ids) {
153 /* 153 /*
154 * Save it for phase 2 processing 154 * Save it for phase 2 processing
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 28d3d483db92..acc4d19ae62a 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -493,14 +493,15 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
493 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 493 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
494 ia64_srlz_d(); 494 ia64_srlz_d();
495 while (vector != IA64_SPURIOUS_INT_VECTOR) { 495 while (vector != IA64_SPURIOUS_INT_VECTOR) {
496 int irq = local_vector_to_irq(vector);
497 struct irq_desc *desc = irq_to_desc(irq);
498
496 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { 499 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
497 smp_local_flush_tlb(); 500 smp_local_flush_tlb();
498 kstat_this_cpu.irqs[vector]++; 501 kstat_incr_irqs_this_cpu(irq, desc);
499 } else if (unlikely(IS_RESCHEDULE(vector))) 502 } else if (unlikely(IS_RESCHEDULE(vector))) {
500 kstat_this_cpu.irqs[vector]++; 503 kstat_incr_irqs_this_cpu(irq, desc);
501 else { 504 } else {
502 int irq = local_vector_to_irq(vector);
503
504 ia64_setreg(_IA64_REG_CR_TPR, vector); 505 ia64_setreg(_IA64_REG_CR_TPR, vector);
505 ia64_srlz_d(); 506 ia64_srlz_d();
506 507
@@ -543,22 +544,24 @@ void ia64_process_pending_intr(void)
543 544
544 vector = ia64_get_ivr(); 545 vector = ia64_get_ivr();
545 546
546 irq_enter(); 547 irq_enter();
547 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 548 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
548 ia64_srlz_d(); 549 ia64_srlz_d();
549 550
550 /* 551 /*
551 * Perform normal interrupt style processing 552 * Perform normal interrupt style processing
552 */ 553 */
553 while (vector != IA64_SPURIOUS_INT_VECTOR) { 554 while (vector != IA64_SPURIOUS_INT_VECTOR) {
555 int irq = local_vector_to_irq(vector);
556 struct irq_desc *desc = irq_to_desc(irq);
557
554 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { 558 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
555 smp_local_flush_tlb(); 559 smp_local_flush_tlb();
556 kstat_this_cpu.irqs[vector]++; 560 kstat_incr_irqs_this_cpu(irq, desc);
557 } else if (unlikely(IS_RESCHEDULE(vector))) 561 } else if (unlikely(IS_RESCHEDULE(vector))) {
558 kstat_this_cpu.irqs[vector]++; 562 kstat_incr_irqs_this_cpu(irq, desc);
559 else { 563 } else {
560 struct pt_regs *old_regs = set_irq_regs(NULL); 564 struct pt_regs *old_regs = set_irq_regs(NULL);
561 int irq = local_vector_to_irq(vector);
562 565
563 ia64_setreg(_IA64_REG_CR_TPR, vector); 566 ia64_setreg(_IA64_REG_CR_TPR, vector);
564 ia64_srlz_d(); 567 ia64_srlz_d();
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 890339339035..2b15e233f7fe 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -7,44 +7,7 @@
7#include <linux/msi.h> 7#include <linux/msi.h>
8#include <linux/dmar.h> 8#include <linux/dmar.h>
9#include <asm/smp.h> 9#include <asm/smp.h>
10 10#include <asm/msidef.h>
11/*
12 * Shifts for APIC-based data
13 */
14
15#define MSI_DATA_VECTOR_SHIFT 0
16#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
17#define MSI_DATA_VECTOR_MASK 0xffffff00
18
19#define MSI_DATA_DELIVERY_SHIFT 8
20#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
21#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
22
23#define MSI_DATA_LEVEL_SHIFT 14
24#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
25#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
26
27#define MSI_DATA_TRIGGER_SHIFT 15
28#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
29#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
30
31/*
32 * Shift/mask fields for APIC-based bus address
33 */
34
35#define MSI_TARGET_CPU_SHIFT 4
36#define MSI_ADDR_HEADER 0xfee00000
37
38#define MSI_ADDR_DESTID_MASK 0xfff0000f
39#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
40
41#define MSI_ADDR_DESTMODE_SHIFT 2
42#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
43#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
44
45#define MSI_ADDR_REDIRECTION_SHIFT 3
46#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
47#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
48 11
49static struct irq_chip ia64_msi_chip; 12static struct irq_chip ia64_msi_chip;
50 13
@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
65 read_msi_msg(irq, &msg); 28 read_msi_msg(irq, &msg);
66 29
67 addr = msg.address_lo; 30 addr = msg.address_lo;
68 addr &= MSI_ADDR_DESTID_MASK; 31 addr &= MSI_ADDR_DEST_ID_MASK;
69 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 32 addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
70 msg.address_lo = addr; 33 msg.address_lo = addr;
71 34
72 data = msg.data; 35 data = msg.data;
@@ -75,7 +38,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
75 msg.data = data; 38 msg.data = data;
76 39
77 write_msi_msg(irq, &msg); 40 write_msi_msg(irq, &msg);
78 irq_desc[irq].affinity = cpumask_of_cpu(cpu); 41 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
79} 42}
80#endif /* CONFIG_SMP */ 43#endif /* CONFIG_SMP */
81 44
@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
98 msg.address_hi = 0; 61 msg.address_hi = 0;
99 msg.address_lo = 62 msg.address_lo =
100 MSI_ADDR_HEADER | 63 MSI_ADDR_HEADER |
101 MSI_ADDR_DESTMODE_PHYS | 64 MSI_ADDR_DEST_MODE_PHYS |
102 MSI_ADDR_REDIRECTION_CPU | 65 MSI_ADDR_REDIRECTION_CPU |
103 MSI_ADDR_DESTID_CPU(dest_phys_id); 66 MSI_ADDR_DEST_ID_CPU(dest_phys_id);
104 67
105 msg.data = 68 msg.data =
106 MSI_DATA_TRIGGER_EDGE | 69 MSI_DATA_TRIGGER_EDGE |
@@ -183,11 +146,11 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
183 146
184 msg.data &= ~MSI_DATA_VECTOR_MASK; 147 msg.data &= ~MSI_DATA_VECTOR_MASK;
185 msg.data |= MSI_DATA_VECTOR(cfg->vector); 148 msg.data |= MSI_DATA_VECTOR(cfg->vector);
186 msg.address_lo &= ~MSI_ADDR_DESTID_MASK; 149 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 150 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
188 151
189 dmar_msi_write(irq, &msg); 152 dmar_msi_write(irq, &msg);
190 irq_desc[irq].affinity = *mask; 153 cpumask_copy(irq_desc[irq].affinity, mask);
191} 154}
192#endif /* CONFIG_SMP */ 155#endif /* CONFIG_SMP */
193 156
@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
215 msg->address_hi = 0; 178 msg->address_hi = 0;
216 msg->address_lo = 179 msg->address_lo =
217 MSI_ADDR_HEADER | 180 MSI_ADDR_HEADER |
218 MSI_ADDR_DESTMODE_PHYS | 181 MSI_ADDR_DEST_MODE_PHYS |
219 MSI_ADDR_REDIRECTION_CPU | 182 MSI_ADDR_REDIRECTION_CPU |
220 MSI_ADDR_DESTID_CPU(dest); 183 MSI_ADDR_DEST_ID_CPU(dest);
221 184
222 msg->data = 185 msg->data =
223 MSI_DATA_TRIGGER_EDGE | 186 MSI_DATA_TRIGGER_EDGE |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e499757309b..5c0f408cfd71 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
2196 return 1; 2196 return 1;
2197} 2197}
2198 2198
2199static struct dentry_operations pfmfs_dentry_operations = { 2199static const struct dentry_operations pfmfs_dentry_operations = {
2200 .d_delete = pfmfs_delete_dentry, 2200 .d_delete = pfmfs_delete_dentry,
2201}; 2201};
2202 2202
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 10a7d47e8510..3765efc5f963 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -213,16 +213,9 @@ SECTIONS
213 { *(.data.cacheline_aligned) } 213 { *(.data.cacheline_aligned) }
214 214
215 /* Per-cpu data: */ 215 /* Per-cpu data: */
216 percpu : { } :percpu
217 . = ALIGN(PERCPU_PAGE_SIZE); 216 . = ALIGN(PERCPU_PAGE_SIZE);
218 __phys_per_cpu_start = .; 217 PERCPU_VADDR(PERCPU_ADDR, :percpu)
219 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) 218 __phys_per_cpu_start = __per_cpu_load;
220 {
221 __per_cpu_start = .;
222 *(.data.percpu)
223 *(.data.percpu.shared_aligned)
224 __per_cpu_end = .;
225 }
226 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits 219 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
227 * into percpu page size 220 * into percpu page size
228 */ 221 */
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index f833a0b4188d..0a2d6b86075a 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -4,6 +4,10 @@
4config HAVE_KVM 4config HAVE_KVM
5 bool 5 bool
6 6
7config HAVE_KVM_IRQCHIP
8 bool
9 default y
10
7menuconfig VIRTUALIZATION 11menuconfig VIRTUALIZATION
8 bool "Virtualization" 12 bool "Virtualization"
9 depends on HAVE_KVM || IA64 13 depends on HAVE_KVM || IA64
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
index c6786e8b1bf4..c0785a728271 100644
--- a/arch/ia64/kvm/irq.h
+++ b/arch/ia64/kvm/irq.h
@@ -23,6 +23,8 @@
23#ifndef __IRQ_H 23#ifndef __IRQ_H
24#define __IRQ_H 24#define __IRQ_H
25 25
26#include "lapic.h"
27
26static inline int irqchip_in_kernel(struct kvm *kvm) 28static inline int irqchip_in_kernel(struct kvm *kvm)
27{ 29{
28 return 1; 30 return 1;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 28f982045f29..076b00d1dbff 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
182 switch (ext) { 182 switch (ext) {
183 case KVM_CAP_IRQCHIP: 183 case KVM_CAP_IRQCHIP:
184 case KVM_CAP_MP_STATE: 184 case KVM_CAP_MP_STATE:
185 185 case KVM_CAP_IRQ_INJECT_STATUS:
186 r = 1; 186 r = 1;
187 break; 187 break;
188 case KVM_CAP_COALESCED_MMIO: 188 case KVM_CAP_COALESCED_MMIO:
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
314 union ia64_lid lid; 314 union ia64_lid lid;
315 int i; 315 int i;
316 316
317 for (i = 0; i < KVM_MAX_VCPUS; i++) { 317 for (i = 0; i < kvm->arch.online_vcpus; i++) {
318 if (kvm->vcpus[i]) { 318 if (kvm->vcpus[i]) {
319 lid.val = VCPU_LID(kvm->vcpus[i]); 319 lid.val = VCPU_LID(kvm->vcpus[i]);
320 if (lid.id == id && lid.eid == eid) 320 if (lid.id == id && lid.eid == eid)
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
388 388
389 call_data.ptc_g_data = p->u.ptc_g_data; 389 call_data.ptc_g_data = p->u.ptc_g_data;
390 390
391 for (i = 0; i < KVM_MAX_VCPUS; i++) { 391 for (i = 0; i < kvm->arch.online_vcpus; i++) {
392 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == 392 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
393 KVM_MP_STATE_UNINITIALIZED || 393 KVM_MP_STATE_UNINITIALIZED ||
394 vcpu == kvm->vcpus[i]) 394 vcpu == kvm->vcpus[i])
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
788 return ERR_PTR(-ENOMEM); 788 return ERR_PTR(-ENOMEM);
789 kvm_init_vm(kvm); 789 kvm_init_vm(kvm);
790 790
791 kvm->arch.online_vcpus = 0;
792
791 return kvm; 793 return kvm;
792 794
793} 795}
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
919 r = kvm_ioapic_init(kvm); 921 r = kvm_ioapic_init(kvm);
920 if (r) 922 if (r)
921 goto out; 923 goto out;
924 r = kvm_setup_default_irq_routing(kvm);
925 if (r) {
926 kfree(kvm->arch.vioapic);
927 goto out;
928 }
922 break; 929 break;
930 case KVM_IRQ_LINE_STATUS:
923 case KVM_IRQ_LINE: { 931 case KVM_IRQ_LINE: {
924 struct kvm_irq_level irq_event; 932 struct kvm_irq_level irq_event;
925 933
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
927 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 935 if (copy_from_user(&irq_event, argp, sizeof irq_event))
928 goto out; 936 goto out;
929 if (irqchip_in_kernel(kvm)) { 937 if (irqchip_in_kernel(kvm)) {
938 __s32 status;
930 mutex_lock(&kvm->lock); 939 mutex_lock(&kvm->lock);
931 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 940 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
932 irq_event.irq, irq_event.level); 941 irq_event.irq, irq_event.level);
933 mutex_unlock(&kvm->lock); 942 mutex_unlock(&kvm->lock);
943 if (ioctl == KVM_IRQ_LINE_STATUS) {
944 irq_event.status = status;
945 if (copy_to_user(argp, &irq_event,
946 sizeof irq_event))
947 goto out;
948 }
934 r = 0; 949 r = 0;
935 } 950 }
936 break; 951 break;
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1149 1164
1150 /*Initialize itc offset for vcpus*/ 1165 /*Initialize itc offset for vcpus*/
1151 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); 1166 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1152 for (i = 0; i < KVM_MAX_VCPUS; i++) { 1167 for (i = 0; i < kvm->arch.online_vcpus; i++) {
1153 v = (struct kvm_vcpu *)((char *)vcpu + 1168 v = (struct kvm_vcpu *)((char *)vcpu +
1154 sizeof(struct kvm_vcpu_data) * i); 1169 sizeof(struct kvm_vcpu_data) * i);
1155 v->arch.itc_offset = itc_offset; 1170 v->arch.itc_offset = itc_offset;
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1283 goto fail; 1298 goto fail;
1284 } 1299 }
1285 1300
1301 kvm->arch.online_vcpus++;
1302
1286 return vcpu; 1303 return vcpu;
1287fail: 1304fail:
1288 return ERR_PTR(r); 1305 return ERR_PTR(r);
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1303 return -EINVAL; 1320 return -EINVAL;
1304} 1321}
1305 1322
1306int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 1323int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1307 struct kvm_debug_guest *dbg) 1324 struct kvm_guest_debug *dbg)
1308{ 1325{
1309 return -EINVAL; 1326 return -EINVAL;
1310} 1327}
@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1421 return 0; 1438 return 0;
1422} 1439}
1423 1440
1441int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1442 struct kvm_ia64_vcpu_stack *stack)
1443{
1444 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1445 return 0;
1446}
1447
1448int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1449 struct kvm_ia64_vcpu_stack *stack)
1450{
1451 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1452 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1453
1454 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1455 return 0;
1456}
1457
1424void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1458void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1425{ 1459{
1426 1460
@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1430 1464
1431 1465
1432long kvm_arch_vcpu_ioctl(struct file *filp, 1466long kvm_arch_vcpu_ioctl(struct file *filp,
1433 unsigned int ioctl, unsigned long arg) 1467 unsigned int ioctl, unsigned long arg)
1434{ 1468{
1435 return -EINVAL; 1469 struct kvm_vcpu *vcpu = filp->private_data;
1470 void __user *argp = (void __user *)arg;
1471 struct kvm_ia64_vcpu_stack *stack = NULL;
1472 long r;
1473
1474 switch (ioctl) {
1475 case KVM_IA64_VCPU_GET_STACK: {
1476 struct kvm_ia64_vcpu_stack __user *user_stack;
1477 void __user *first_p = argp;
1478
1479 r = -EFAULT;
1480 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1481 goto out;
1482
1483 if (!access_ok(VERIFY_WRITE, user_stack,
1484 sizeof(struct kvm_ia64_vcpu_stack))) {
1485 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1486 "Illegal user destination address for stack\n");
1487 goto out;
1488 }
1489 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1490 if (!stack) {
1491 r = -ENOMEM;
1492 goto out;
1493 }
1494
1495 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1496 if (r)
1497 goto out;
1498
1499 if (copy_to_user(user_stack, stack,
1500 sizeof(struct kvm_ia64_vcpu_stack)))
1501 goto out;
1502
1503 break;
1504 }
1505 case KVM_IA64_VCPU_SET_STACK: {
1506 struct kvm_ia64_vcpu_stack __user *user_stack;
1507 void __user *first_p = argp;
1508
1509 r = -EFAULT;
1510 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1511 goto out;
1512
1513 if (!access_ok(VERIFY_READ, user_stack,
1514 sizeof(struct kvm_ia64_vcpu_stack))) {
1515 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1516 "Illegal user address for stack\n");
1517 goto out;
1518 }
1519 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1520 if (!stack) {
1521 r = -ENOMEM;
1522 goto out;
1523 }
1524 if (copy_from_user(stack, user_stack,
1525 sizeof(struct kvm_ia64_vcpu_stack)))
1526 goto out;
1527
1528 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1529 break;
1530 }
1531
1532 default:
1533 r = -EINVAL;
1534 }
1535
1536out:
1537 kfree(stack);
1538 return r;
1436} 1539}
1437 1540
1438int kvm_arch_set_memory_region(struct kvm *kvm, 1541int kvm_arch_set_memory_region(struct kvm *kvm,
@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
1472} 1575}
1473 1576
1474long kvm_arch_dev_ioctl(struct file *filp, 1577long kvm_arch_dev_ioctl(struct file *filp,
1475 unsigned int ioctl, unsigned long arg) 1578 unsigned int ioctl, unsigned long arg)
1476{ 1579{
1477 return -EINVAL; 1580 return -EINVAL;
1478} 1581}
@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1737 struct kvm_vcpu *lvcpu = kvm->vcpus[0]; 1840 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1738 int i; 1841 int i;
1739 1842
1740 for (i = 1; i < KVM_MAX_VCPUS; i++) { 1843 for (i = 1; i < kvm->arch.online_vcpus; i++) {
1741 if (!kvm->vcpus[i]) 1844 if (!kvm->vcpus[i])
1742 continue; 1845 continue;
1743 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) 1846 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index cb7600bdff9d..a8ae52ed5635 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
227 return result; 227 return result;
228} 228}
229 229
230static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
231{
232
233 struct ia64_pal_retval result = {0, 0, 0, 0};
234 long in0, in1, in2, in3;
235
236 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
237 result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
238
239 return result;
240}
241
230static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) 242static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
231{ 243{
232 244
@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
268static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) 280static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
269{ 281{
270 struct ia64_pal_retval result; 282 struct ia64_pal_retval result;
283 unsigned long in0, in1, in2, in3;
271 284
272 INIT_PAL_STATUS_UNIMPLEMENTED(result); 285 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
286
287 result.status = ia64_pal_vm_info(in1, in2,
288 (pal_tc_info_u_t *)&result.v1, &result.v2);
273 289
274 return result; 290 return result;
275} 291}
@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu)
292 vcpu->arch.timer_fired = 0; 308 vcpu->arch.timer_fired = 0;
293} 309}
294 310
311static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
312{
313 long status;
314 unsigned long in0, in1, in2, in3, r9;
315 unsigned long pm_buffer[16];
316
317 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
318 status = ia64_pal_perf_mon_info(pm_buffer,
319 (pal_perf_mon_info_u_t *) &r9);
320 if (status != 0) {
321 printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
322 } else {
323 if (in1)
324 memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
325 else {
326 status = PAL_STATUS_EINVAL;
327 printk(KERN_WARNING"Invalid parameters "
328 "for PAL call:0x%lx!\n", in0);
329 }
330 }
331 return (struct ia64_pal_retval){status, r9, 0, 0};
332}
333
334static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
335{
336 unsigned long in0, in1, in2, in3;
337 long status;
338 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
339 | (1UL << 61) | (1UL << 60);
340
341 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
342 if (in1) {
343 memcpy((void *)in1, &res, sizeof(res));
344 status = 0;
345 } else{
346 status = PAL_STATUS_EINVAL;
347 printk(KERN_WARNING"Invalid parameters "
348 "for PAL call:0x%lx!\n", in0);
349 }
350
351 return (struct ia64_pal_retval){status, 0, 0, 0};
352}
353
354static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
355{
356 unsigned long r9;
357 long status;
358
359 status = ia64_pal_mem_attrib(&r9);
360
361 return (struct ia64_pal_retval){status, r9, 0, 0};
362}
363
364static void remote_pal_prefetch_visibility(void *v)
365{
366 s64 trans_type = (s64)v;
367 ia64_pal_prefetch_visibility(trans_type);
368}
369
370static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
371{
372 struct ia64_pal_retval result = {0, 0, 0, 0};
373 unsigned long in0, in1, in2, in3;
374 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
375 result.status = ia64_pal_prefetch_visibility(in1);
376 if (result.status == 0) {
377 /* Must be performed on all remote processors
378 in the coherence domain. */
379 smp_call_function(remote_pal_prefetch_visibility,
380 (void *)in1, 1);
381 /* Unnecessary on remote processor for other vcpus!*/
382 result.status = 1;
383 }
384 return result;
385}
386
387static void remote_pal_mc_drain(void *v)
388{
389 ia64_pal_mc_drain();
390}
391
392static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
393{
394 struct ia64_pal_retval result = {0, 0, 0, 0};
395 unsigned long in0, in1, in2, in3;
396
397 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
398
399 if (in1 == 0 && in2) {
400 char brand_info[128];
401 result.status = ia64_pal_get_brand_info(brand_info);
402 if (result.status == PAL_STATUS_SUCCESS)
403 memcpy((void *)in2, brand_info, 128);
404 } else {
405 result.status = PAL_STATUS_REQUIRES_MEMORY;
406 printk(KERN_WARNING"Invalid parameters for "
407 "PAL call:0x%lx!\n", in0);
408 }
409
410 return result;
411}
412
295int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) 413int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
296{ 414{
297 415
@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
300 int ret = 1; 418 int ret = 1;
301 419
302 gr28 = kvm_get_pal_call_index(vcpu); 420 gr28 = kvm_get_pal_call_index(vcpu);
303 /*printk("pal_call index:%lx\n",gr28);*/
304 switch (gr28) { 421 switch (gr28) {
305 case PAL_CACHE_FLUSH: 422 case PAL_CACHE_FLUSH:
306 result = pal_cache_flush(vcpu); 423 result = pal_cache_flush(vcpu);
307 break; 424 break;
425 case PAL_MEM_ATTRIB:
426 result = pal_mem_attrib(vcpu);
427 break;
308 case PAL_CACHE_SUMMARY: 428 case PAL_CACHE_SUMMARY:
309 result = pal_cache_summary(vcpu); 429 result = pal_cache_summary(vcpu);
310 break; 430 break;
431 case PAL_PERF_MON_INFO:
432 result = pal_perf_mon_info(vcpu);
433 break;
434 case PAL_HALT_INFO:
435 result = pal_halt_info(vcpu);
436 break;
311 case PAL_HALT_LIGHT: 437 case PAL_HALT_LIGHT:
312 { 438 {
313 INIT_PAL_STATUS_SUCCESS(result); 439 INIT_PAL_STATUS_SUCCESS(result);
@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
317 } 443 }
318 break; 444 break;
319 445
446 case PAL_PREFETCH_VISIBILITY:
447 result = pal_prefetch_visibility(vcpu);
448 break;
449 case PAL_MC_DRAIN:
450 result.status = ia64_pal_mc_drain();
451 /* FIXME: All vcpus likely call PAL_MC_DRAIN.
452 That causes the congestion. */
453 smp_call_function(remote_pal_mc_drain, NULL, 1);
454 break;
455
320 case PAL_FREQ_RATIOS: 456 case PAL_FREQ_RATIOS:
321 result = pal_freq_ratios(vcpu); 457 result = pal_freq_ratios(vcpu);
322 break; 458 break;
@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
346 INIT_PAL_STATUS_SUCCESS(result); 482 INIT_PAL_STATUS_SUCCESS(result);
347 result.v1 = (1L << 32) | 1L; 483 result.v1 = (1L << 32) | 1L;
348 break; 484 break;
485 case PAL_REGISTER_INFO:
486 result = pal_register_info(vcpu);
487 break;
349 case PAL_VM_PAGE_SIZE: 488 case PAL_VM_PAGE_SIZE:
350 result.status = ia64_pal_vm_page_size(&result.v0, 489 result.status = ia64_pal_vm_page_size(&result.v0,
351 &result.v1); 490 &result.v1);
@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
365 result.status = ia64_pal_version( 504 result.status = ia64_pal_version(
366 (pal_version_u_t *)&result.v0, 505 (pal_version_u_t *)&result.v0,
367 (pal_version_u_t *)&result.v1); 506 (pal_version_u_t *)&result.v1);
368
369 break; 507 break;
370 case PAL_FIXED_ADDR: 508 case PAL_FIXED_ADDR:
371 result.status = PAL_STATUS_SUCCESS; 509 result.status = PAL_STATUS_SUCCESS;
372 result.v0 = vcpu->vcpu_id; 510 result.v0 = vcpu->vcpu_id;
373 break; 511 break;
512 case PAL_BRAND_INFO:
513 result = pal_get_brand_info(vcpu);
514 break;
515 case PAL_GET_PSTATE:
516 case PAL_CACHE_SHARED_INFO:
517 INIT_PAL_STATUS_UNIMPLEMENTED(result);
518 break;
374 default: 519 default:
375 INIT_PAL_STATUS_UNIMPLEMENTED(result); 520 INIT_PAL_STATUS_UNIMPLEMENTED(result);
376 printk(KERN_WARNING"kvm: Unsupported pal call," 521 printk(KERN_WARNING"kvm: Unsupported pal call,"
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 230eae482f32..b1dc80952d91 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
167 return (rr1.val); 167 return (rr1.val);
168} 168}
169 169
170
171/* 170/*
172 * Set vIFA & vITIR & vIHA, when vPSR.ic =1 171 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
173 * Parameter: 172 * Parameter:
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
222 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); 221 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
223} 222}
224 223
225
226
227/* 224/*
228 * Data Nested TLB Fault 225 * Data Nested TLB Fault
229 * @ Data Nested TLB Vector 226 * @ Data Nested TLB Vector
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
245 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); 242 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
246} 243}
247 244
248
249/* 245/*
250 * Data TLB Fault 246 * Data TLB Fault
251 * @ Data TLB vector 247 * @ Data TLB vector
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
265 /* If vPSR.ic, IFA, ITIR, IHA*/ 261 /* If vPSR.ic, IFA, ITIR, IHA*/
266 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); 262 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
267 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); 263 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
268
269
270} 264}
271 265
272/* 266/*
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
279 _vhpt_fault(vcpu, vadr); 273 _vhpt_fault(vcpu, vadr);
280} 274}
281 275
282
283/* 276/*
284 * VHPT Data Fault 277 * VHPT Data Fault
285 * @ VHPT Translation vector 278 * @ VHPT Translation vector
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
290 _vhpt_fault(vcpu, vadr); 283 _vhpt_fault(vcpu, vadr);
291} 284}
292 285
293
294
295/* 286/*
296 * Deal with: 287 * Deal with:
297 * General Exception vector 288 * General Exception vector
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu)
301 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); 292 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
302} 293}
303 294
304
305/* 295/*
306 * Illegal Operation Fault 296 * Illegal Operation Fault
307 * @ General Exception Vector 297 * @ General Exception Vector
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
419 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); 409 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
420} 410}
421 411
422
423void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) 412void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
424{ 413{
425 __page_not_present(vcpu, vadr); 414 __page_not_present(vcpu, vadr);
426} 415}
427 416
428
429void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) 417void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
430{ 418{
431 __page_not_present(vcpu, vadr); 419 __page_not_present(vcpu, vadr);
432} 420}
433 421
434
435/* Deal with 422/* Deal with
436 * Data access rights vector 423 * Data access rights vector
437 */ 424 */
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
563 inject_guest_interruption(vcpu, vector); 550 inject_guest_interruption(vcpu, vector);
564} 551}
565 552
553static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
554 unsigned long arg)
555{
556 struct thash_data *data;
557 unsigned long gpa, poff;
558
559 if (!is_physical_mode(vcpu)) {
560 /* Depends on caller to provide the DTR or DTC mapping.*/
561 data = vtlb_lookup(vcpu, arg, D_TLB);
562 if (data)
563 gpa = data->page_flags & _PAGE_PPN_MASK;
564 else {
565 data = vhpt_lookup(arg);
566 if (!data)
567 return 0;
568 gpa = data->gpaddr & _PAGE_PPN_MASK;
569 }
570
571 poff = arg & (PSIZE(data->ps) - 1);
572 arg = PAGEALIGN(gpa, data->ps) | poff;
573 }
574 arg = kvm_gpa_to_mpa(arg << 1 >> 1);
575
576 return (unsigned long)__va(arg);
577}
578
566static void set_pal_call_data(struct kvm_vcpu *vcpu) 579static void set_pal_call_data(struct kvm_vcpu *vcpu)
567{ 580{
568 struct exit_ctl_data *p = &vcpu->arch.exit_data; 581 struct exit_ctl_data *p = &vcpu->arch.exit_data;
582 unsigned long gr28 = vcpu_get_gr(vcpu, 28);
583 unsigned long gr29 = vcpu_get_gr(vcpu, 29);
584 unsigned long gr30 = vcpu_get_gr(vcpu, 30);
569 585
570 /*FIXME:For static and stacked convention, firmware 586 /*FIXME:For static and stacked convention, firmware
571 * has put the parameters in gr28-gr31 before 587 * has put the parameters in gr28-gr31 before
572 * break to vmm !!*/ 588 * break to vmm !!*/
573 589
574 p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); 590 switch (gr28) {
575 p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); 591 case PAL_PERF_MON_INFO:
576 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); 592 case PAL_HALT_INFO:
593 p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
594 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
595 break;
596 case PAL_BRAND_INFO:
597 p->u.pal_data.gr29 = gr29;;
598 p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
599 break;
600 default:
601 p->u.pal_data.gr29 = gr29;;
602 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
603 }
604 p->u.pal_data.gr28 = gr28;
577 p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); 605 p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
606
578 p->exit_reason = EXIT_REASON_PAL_CALL; 607 p->exit_reason = EXIT_REASON_PAL_CALL;
579} 608}
580 609
581static void set_pal_call_result(struct kvm_vcpu *vcpu) 610static void get_pal_call_result(struct kvm_vcpu *vcpu)
582{ 611{
583 struct exit_ctl_data *p = &vcpu->arch.exit_data; 612 struct exit_ctl_data *p = &vcpu->arch.exit_data;
584 613
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu)
606 p->exit_reason = EXIT_REASON_SAL_CALL; 635 p->exit_reason = EXIT_REASON_SAL_CALL;
607} 636}
608 637
609static void set_sal_call_result(struct kvm_vcpu *vcpu) 638static void get_sal_call_result(struct kvm_vcpu *vcpu)
610{ 639{
611 struct exit_ctl_data *p = &vcpu->arch.exit_data; 640 struct exit_ctl_data *p = &vcpu->arch.exit_data;
612 641
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
629 if (iim == DOMN_PAL_REQUEST) { 658 if (iim == DOMN_PAL_REQUEST) {
630 set_pal_call_data(v); 659 set_pal_call_data(v);
631 vmm_transition(v); 660 vmm_transition(v);
632 set_pal_call_result(v); 661 get_pal_call_result(v);
633 vcpu_increment_iip(v); 662 vcpu_increment_iip(v);
634 return; 663 return;
635 } else if (iim == DOMN_SAL_REQUEST) { 664 } else if (iim == DOMN_SAL_REQUEST) {
636 set_sal_call_data(v); 665 set_sal_call_data(v);
637 vmm_transition(v); 666 vmm_transition(v);
638 set_sal_call_result(v); 667 get_sal_call_result(v);
639 vcpu_increment_iip(v); 668 vcpu_increment_iip(v);
640 return; 669 return;
641 } 670 }
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu)
703 } 732 }
704} 733}
705 734
706
707void leave_hypervisor_tail(void) 735void leave_hypervisor_tail(void)
708{ 736{
709 struct kvm_vcpu *v = current_vcpu; 737 struct kvm_vcpu *v = current_vcpu;
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void)
737 } 765 }
738} 766}
739 767
740
741static inline void handle_lds(struct kvm_pt_regs *regs) 768static inline void handle_lds(struct kvm_pt_regs *regs)
742{ 769{
743 regs->cr_ipsr |= IA64_PSR_ED; 770 regs->cr_ipsr |= IA64_PSR_ED;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index ecd526b55323..d4d280505878 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
112 return; 112 return;
113} 113}
114 114
115
116void switch_to_virtual_rid(struct kvm_vcpu *vcpu) 115void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117{ 116{
118 unsigned long psr; 117 unsigned long psr;
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
166 return; 165 return;
167} 166}
168 167
169
170
171/* 168/*
172 * In physical mode, insert tc/tr for region 0 and 4 uses 169 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation. 170 * RID[0] and RID[4] which is for physical mode emulation.
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); 266 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270} 267}
271 268
272
273/* 269/*
274 * The inverse of the above: given bspstore and the number of 270 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp. 271 * registers, calculate ar.bsp.
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) 807static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
812{ 808{
813 struct kvm_vcpu *v; 809 struct kvm_vcpu *v;
810 struct kvm *kvm;
814 int i; 811 int i;
815 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); 812 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816 unsigned long vitv = VCPU(vcpu, itv); 813 unsigned long vitv = VCPU(vcpu, itv);
817 814
815 kvm = (struct kvm *)KVM_VM_BASE;
816
818 if (vcpu->vcpu_id == 0) { 817 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < KVM_MAX_VCPUS; i++) { 818 for (i = 0; i < kvm->arch.online_vcpus; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + 819 v = (struct kvm_vcpu *)((char *)vcpu +
821 sizeof(struct kvm_vcpu_data) * i); 820 sizeof(struct kvm_vcpu_data) * i);
822 VMX(v, itc_offset) = itc_offset; 821 VMX(v, itc_offset) = itc_offset;
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1039 return key; 1038 return key;
1040} 1039}
1041 1040
1042
1043
1044void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) 1041void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1045{ 1042{
1046 unsigned long thash, vadr; 1043 unsigned long thash, vadr;
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1050 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); 1047 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1051} 1048}
1052 1049
1053
1054void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) 1050void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1055{ 1051{
1056 unsigned long tag, vadr; 1052 unsigned long tag, vadr;
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1131 return IA64_NO_FAULT; 1127 return IA64_NO_FAULT;
1132} 1128}
1133 1129
1134
1135int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) 1130int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1136{ 1131{
1137 unsigned long r1, r3; 1132 unsigned long r1, r3;
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1154 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 1149 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1155} 1150}
1156 1151
1157
1158/************************************ 1152/************************************
1159 * Insert/Purge translation register/cache 1153 * Insert/Purge translation register/cache
1160 ************************************/ 1154 ************************************/
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1385 vcpu_set_itc(vcpu, r2); 1379 vcpu_set_itc(vcpu, r2);
1386} 1380}
1387 1381
1388
1389void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) 1382void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1390{ 1383{
1391 unsigned long r1; 1384 unsigned long r1;
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1393 r1 = vcpu_get_itc(vcpu); 1386 r1 = vcpu_get_itc(vcpu);
1394 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); 1387 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1395} 1388}
1389
1396/************************************************************************** 1390/**************************************************************************
1397 struct kvm_vcpu*protection key register access routines 1391 struct kvm_vcpu protection key register access routines
1398 **************************************************************************/ 1392 **************************************************************************/
1399 1393
1400unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) 1394unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1407 ia64_set_pkr(reg, val); 1401 ia64_set_pkr(reg, val);
1408} 1402}
1409 1403
1410
1411unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1412{
1413 union ia64_rr rr, rr1;
1414
1415 rr.val = vcpu_get_rr(vcpu, ifa);
1416 rr1.val = 0;
1417 rr1.ps = rr.ps;
1418 rr1.rid = rr.rid;
1419 return (rr1.val);
1420}
1421
1422
1423
1424/******************************** 1404/********************************
1425 * Moves to privileged registers 1405 * Moves to privileged registers
1426 ********************************/ 1406 ********************************/
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1464 return (IA64_NO_FAULT); 1444 return (IA64_NO_FAULT);
1465} 1445}
1466 1446
1467
1468
1469void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) 1447void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1470{ 1448{
1471 unsigned long r3, r2; 1449 unsigned long r3, r2;
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1510 vcpu_set_pkr(vcpu, r3, r2); 1488 vcpu_set_pkr(vcpu, r3, r2);
1511} 1489}
1512 1490
1513
1514
1515void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) 1491void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1516{ 1492{
1517 unsigned long r3, r1; 1493 unsigned long r3, r1;
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1557 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); 1533 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1558} 1534}
1559 1535
1560
1561unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) 1536unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1562{ 1537{
1563 /* FIXME: This could get called as a result of a rsvd-reg fault */ 1538 /* FIXME: This could get called as a result of a rsvd-reg fault */
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1609 return 0; 1584 return 0;
1610} 1585}
1611 1586
1612
1613unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) 1587unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1614{ 1588{
1615 unsigned long tgt = inst.M33.r1; 1589 unsigned long tgt = inst.M33.r1;
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1633 return 0; 1607 return 0;
1634} 1608}
1635 1609
1636
1637
1638void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) 1610void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639{ 1611{
1640 1612
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
1776 } 1748 }
1777} 1749}
1778 1750
1779
1780
1781
1782void vcpu_rfi(struct kvm_vcpu *vcpu) 1751void vcpu_rfi(struct kvm_vcpu *vcpu)
1783{ 1752{
1784 unsigned long ifs, psr; 1753 unsigned long ifs, psr;
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
1796 regs->cr_iip = VCPU(vcpu, iip); 1765 regs->cr_iip = VCPU(vcpu, iip);
1797} 1766}
1798 1767
1799
1800/* 1768/*
1801 VPSR can't keep track of below bits of guest PSR 1769 VPSR can't keep track of below bits of guest PSR
1802 This function gets guest PSR 1770 This function gets guest PSR
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index b2f12a562bdf..042af92ced83 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
703extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); 703extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
704extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); 704extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
705extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); 705extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
706extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, 706extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
707 u64 itir, u64 ifa, int type); 707 u64 itir, u64 ifa, int type);
708extern void thash_purge_all(struct kvm_vcpu *v); 708extern void thash_purge_all(struct kvm_vcpu *v);
709extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, 709extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
738void thash_init(struct thash_cb *hcb, u64 sz); 738void thash_init(struct thash_cb *hcb, u64 sz);
739 739
740void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); 740void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
741 741u64 kvm_gpa_to_mpa(u64 gpa);
742extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, 742extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
743 u64 arg4, u64 arg5, u64 arg6, u64 arg7); 743 u64 arg4, u64 arg5, u64 arg6, u64 arg7);
744 744
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a3bd55..38232b37668b 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
164 unsigned long ps, gpaddr; 164 unsigned long ps, gpaddr;
165 165
166 ps = itir_ps(itir); 166 ps = itir_ps(itir);
167 rr.val = ia64_get_rr(ifa);
167 168
168 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | 169 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
169 (ifa & ((1UL << ps) - 1)); 170 (ifa & ((1UL << ps) - 1));
170 171
171 rr.val = ia64_get_rr(ifa);
172 head = (struct thash_data *)ia64_thash(ifa); 172 head = (struct thash_data *)ia64_thash(ifa);
173 head->etag = INVALID_TI_TAG; 173 head->etag = INVALID_TI_TAG;
174 ia64_mf(); 174 ia64_mf();
@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
412 412
413/* 413/*
414 * Purge overlap TCs and then insert the new entry to emulate itc ops. 414 * Purge overlap TCs and then insert the new entry to emulate itc ops.
415 * Notes: Only TC entry can purge and insert. 415 * Notes: Only TC entry can purge and insert.
416 * 1 indicates this is MMIO
417 */ 416 */
418int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, 417void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
419 u64 ifa, int type) 418 u64 ifa, int type)
420{ 419{
421 u64 ps; 420 u64 ps;
422 u64 phy_pte, io_mask, index; 421 u64 phy_pte, io_mask, index;
423 union ia64_rr vrr, mrr; 422 union ia64_rr vrr, mrr;
424 int ret = 0;
425 423
426 ps = itir_ps(itir); 424 ps = itir_ps(itir);
427 vrr.val = vcpu_get_rr(v, ifa); 425 vrr.val = vcpu_get_rr(v, ifa);
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
441 phy_pte &= ~_PAGE_MA_MASK; 439 phy_pte &= ~_PAGE_MA_MASK;
442 } 440 }
443 441
444 if (pte & VTLB_PTE_IO)
445 ret = 1;
446
447 vtlb_purge(v, ifa, ps); 442 vtlb_purge(v, ifa, ps);
448 vhpt_purge(v, ifa, ps); 443 vhpt_purge(v, ifa, ps);
449 444
450 if (ps == mrr.ps) { 445 if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
451 if (!(pte&VTLB_PTE_IO)) {
452 vhpt_insert(phy_pte, itir, ifa, pte);
453 } else {
454 vtlb_insert(v, pte, itir, ifa);
455 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
456 }
457 } else if (ps > mrr.ps) {
458 vtlb_insert(v, pte, itir, ifa); 446 vtlb_insert(v, pte, itir, ifa);
459 vcpu_quick_region_set(VMX(v, tc_regions), ifa); 447 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
460 if (!(pte&VTLB_PTE_IO)) 448 }
461 vhpt_insert(phy_pte, itir, ifa, pte); 449 if (pte & VTLB_PTE_IO)
462 } else { 450 return;
451
452 if (ps >= mrr.ps)
453 vhpt_insert(phy_pte, itir, ifa, pte);
454 else {
463 u64 psr; 455 u64 psr;
464 phy_pte &= ~PAGE_FLAGS_RV_MASK; 456 phy_pte &= ~PAGE_FLAGS_RV_MASK;
465 psr = ia64_clear_ic(); 457 psr = ia64_clear_ic();
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
469 if (!(pte&VTLB_PTE_IO)) 461 if (!(pte&VTLB_PTE_IO))
470 mark_pages_dirty(v, pte, ps); 462 mark_pages_dirty(v, pte, ps);
471 463
472 return ret;
473} 464}
474 465
475/* 466/*
@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v)
509 local_flush_tlb_all(); 500 local_flush_tlb_all();
510} 501}
511 502
512
513/* 503/*
514 * Lookup the hash table and its collision chain to find an entry 504 * Lookup the hash table and its collision chain to find an entry
515 * covering this address rid:va or the entry. 505 * covering this address rid:va or the entry.
@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v)
517 * INPUT: 507 * INPUT:
518 * in: TLB format for both VHPT & TLB. 508 * in: TLB format for both VHPT & TLB.
519 */ 509 */
520
521struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) 510struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
522{ 511{
523 struct thash_data *cch; 512 struct thash_data *cch;
@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
547 return NULL; 536 return NULL;
548} 537}
549 538
550
551/* 539/*
552 * Initialize internal control data before service. 540 * Initialize internal control data before service.
553 */ 541 */
@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
573u64 kvm_get_mpt_entry(u64 gpfn) 561u64 kvm_get_mpt_entry(u64 gpfn)
574{ 562{
575 u64 *base = (u64 *) KVM_P2M_BASE; 563 u64 *base = (u64 *) KVM_P2M_BASE;
564
565 if (gpfn >= (KVM_P2M_SIZE >> 3))
566 panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
567
576 return *(base + gpfn); 568 return *(base + gpfn);
577} 569}
578 570
@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
589 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); 581 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
590} 582}
591 583
592
593/* 584/*
594 * Fetch guest bundle code. 585 * Fetch guest bundle code.
595 * INPUT: 586 * INPUT:
@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
631 return IA64_NO_FAULT; 622 return IA64_NO_FAULT;
632} 623}
633 624
634
635void kvm_init_vhpt(struct kvm_vcpu *v) 625void kvm_init_vhpt(struct kvm_vcpu *v)
636{ 626{
637 v->arch.vhpt.num = VHPT_NUM_ENTRIES; 627 v->arch.vhpt.num = VHPT_NUM_ENTRIES;
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index ca553b0429ce..81e428943d73 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
205 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 205 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
206 206
207 write_msi_msg(irq, &msg); 207 write_msi_msg(irq, &msg);
208 irq_desc[irq].affinity = *cpu_mask; 208 cpumask_copy(irq_desc[irq].affinity, cpu_mask);
209} 209}
210#endif /* CONFIG_SMP */ 210#endif /* CONFIG_SMP */
211 211