aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-10-23 02:56:44 -0400
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:49 -0500
commita917f7af3905953329361d29b6db78eb17b4d44c (patch)
treea873216c93f09af69f9a68fa831df822a3810fd8
parent1d5a4d9b92028d9fe77da34037bd5a1ebfecc733 (diff)
KVM: ia64: Re-organize data sturure of guests' data area
1. Increase the size of data area to 64M 2. Support more vcpus and memory, 128 vcpus and 256G memory are supported for guests. 3. Add the boundary check for memory and vcpu allocation. With this patch, kvm guest's data area looks as follow: * * +----------------------+ ------- KVM_VM_DATA_SIZE * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET * | | | / | * | .......... | | /vcpu's struct&stack | * | .......... | | /---------------------|---- 0 * | vcpu[5]'s data | | / vpd | * | vcpu[4]'s data | |/-----------------------| * | vcpu[3]'s data | / vtlb | * | vcpu[2]'s data | /|------------------------| * | vcpu[1]'s data |/ | vhpt | * | vcpu[0]'s data |____________________________| * +----------------------+ | * | memory dirty log | | * +----------------------+ | * | vm's data struct | | * +----------------------+ | * | | | * | | | * | | | * | | | * | | | * | | | * | | | * | vm's p2m table | | * | | | * | | | * | | | | * vm's data->| | | | * +----------------------+ ------- 0 * To support large memory, needs to increase the size of p2m. * To support more vcpus, needs to ensure it has enough space to * hold vcpus' data. */ Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/ia64/include/asm/kvm_host.h192
-rw-r--r--arch/ia64/kvm/kvm-ia64.c60
-rw-r--r--arch/ia64/kvm/kvm_minstate.h4
-rw-r--r--arch/ia64/kvm/misc.h3
-rw-r--r--arch/ia64/kvm/vcpu.c5
-rw-r--r--arch/ia64/kvm/vtlb.c4
6 files changed, 161 insertions, 107 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index c60d324da540..678e2646a500 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -23,17 +23,6 @@
23#ifndef __ASM_KVM_HOST_H 23#ifndef __ASM_KVM_HOST_H
24#define __ASM_KVM_HOST_H 24#define __ASM_KVM_HOST_H
25 25
26
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31#include <linux/kvm_types.h>
32
33#include <asm/pal.h>
34#include <asm/sal.h>
35
36#define KVM_MAX_VCPUS 4
37#define KVM_MEMORY_SLOTS 32 26#define KVM_MEMORY_SLOTS 32
38/* memory slots that does not exposed to userspace */ 27/* memory slots that does not exposed to userspace */
39#define KVM_PRIVATE_MEM_SLOTS 4 28#define KVM_PRIVATE_MEM_SLOTS 4
@@ -52,68 +41,127 @@
52#define EXIT_REASON_PTC_G 8 41#define EXIT_REASON_PTC_G 8
53 42
54/*Define vmm address space and vm data space.*/ 43/*Define vmm address space and vm data space.*/
55#define KVM_VMM_SIZE (16UL<<20) 44#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
56#define KVM_VMM_SHIFT 24 45#define KVM_VMM_SHIFT 24
57#define KVM_VMM_BASE 0xD000000000000000UL 46#define KVM_VMM_BASE 0xD000000000000000
58#define VMM_SIZE (8UL<<20) 47#define VMM_SIZE (__IA64_UL_CONST(8)<<20)
59 48
60/* 49/*
61 * Define vm_buffer, used by PAL Services, base address. 50 * Define vm_buffer, used by PAL Services, base address.
62 * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M 51 * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
63 */ 52 */
64#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) 53#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
65#define KVM_VM_BUFFER_SIZE (8UL<<20) 54#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
66 55
67/*Define Virtual machine data layout.*/ 56/*
68#define KVM_VM_DATA_SHIFT 24 57 * kvm guest's data area looks as follow:
69#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) 58 *
70#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) 59 * +----------------------+ ------- KVM_VM_DATA_SIZE
71 60 * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
72 61 * | | | / |
73#define KVM_P2M_BASE KVM_VM_DATA_BASE 62 * | .......... | | /vcpu's struct&stack |
74#define KVM_P2M_OFS 0 63 * | .......... | | /---------------------|---- 0
75#define KVM_P2M_SIZE (8UL << 20) 64 * | vcpu[5]'s data | | / vpd |
76 65 * | vcpu[4]'s data | |/-----------------------|
77#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) 66 * | vcpu[3]'s data | / vtlb |
78#define KVM_VHPT_OFS KVM_P2M_SIZE 67 * | vcpu[2]'s data | /|------------------------|
79#define KVM_VHPT_BLOCK_SIZE (2UL << 20) 68 * | vcpu[1]'s data |/ | vhpt |
80#define VHPT_SHIFT 18 69 * | vcpu[0]'s data |____________________________|
81#define VHPT_SIZE (1UL << VHPT_SHIFT) 70 * +----------------------+ |
82#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) 71 * | memory dirty log | |
83 72 * +----------------------+ |
84#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE) 73 * | vm's data struct | |
85#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE) 74 * +----------------------+ |
86#define KVM_VTLB_BLOCK_SIZE (1UL<<20) 75 * | | |
87#define VTLB_SHIFT 17 76 * | | |
88#define VTLB_SIZE (1UL<<VTLB_SHIFT) 77 * | | |
89#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5)) 78 * | | |
90 79 * | | |
91#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE) 80 * | | |
92#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE) 81 * | | |
93#define KVM_VPD_BLOCK_SIZE (2UL<<20) 82 * | vm's p2m table | |
94#define VPD_SHIFT 16 83 * | | |
95#define VPD_SIZE (1UL<<VPD_SHIFT) 84 * | | |
96 85 * | | | |
97#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) 86 * vm's data->| | | |
98#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) 87 * +----------------------+ ------- 0
99#define KVM_VCPU_BLOCK_SIZE (2UL<<20) 88 * To support large memory, needs to increase the size of p2m.
100#define VCPU_SHIFT 18 89 * To support more vcpus, needs to ensure it has enough space to
101#define VCPU_SIZE (1UL<<VCPU_SHIFT) 90 * hold vcpus' data.
102#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE 91 */
103 92
104#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) 93#define KVM_VM_DATA_SHIFT 26
105#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE) 94#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
106#define KVM_VM_BLOCK_SIZE (1UL<<19) 95#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
107 96
108#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) 97#define KVM_P2M_BASE KVM_VM_DATA_BASE
109#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) 98#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
110#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) 99
111 100#define VHPT_SHIFT 16
112/* Get vpd, vhpt, tlb, vcpu, base*/ 101#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
113#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE) 102#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
114#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) 103
115#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) 104#define VTLB_SHIFT 16
116#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) 105#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
106#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
107
108#define VPD_SHIFT 16
109#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
110
111#define VCPU_STRUCT_SHIFT 16
112#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
113
114#define KVM_STK_OFFSET VCPU_STRUCT_SIZE
115
116#define KVM_VM_STRUCT_SHIFT 19
117#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
118
119#define KVM_MEM_DIRY_LOG_SHIFT 19
120#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
121
122#ifndef __ASSEMBLY__
123
124/*Define the max vcpus and memory for Guests.*/
125#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
126 KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
127#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
128
129#include <linux/types.h>
130#include <linux/mm.h>
131#include <linux/kvm.h>
132#include <linux/kvm_para.h>
133#include <linux/kvm_types.h>
134
135#include <asm/pal.h>
136#include <asm/sal.h>
137#include <asm/page.h>
138
139struct kvm_vcpu_data {
140 char vcpu_vhpt[VHPT_SIZE];
141 char vcpu_vtlb[VTLB_SIZE];
142 char vcpu_vpd[VPD_SIZE];
143 char vcpu_struct[VCPU_STRUCT_SIZE];
144};
145
146struct kvm_vm_data {
147 char kvm_p2m[KVM_P2M_SIZE];
148 char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
149 char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
150 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
151};
152
153#define VCPU_BASE(n) KVM_VM_DATA_BASE + \
154 offsetof(struct kvm_vm_data, vcpu_data[n])
155#define VM_BASE KVM_VM_DATA_BASE + \
156 offsetof(struct kvm_vm_data, kvm_vm_struct)
157#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
158 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
159
160#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
161#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
162#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
163#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
164 offsetof(struct kvm_vcpu_data, vcpu_struct))
117 165
118/*IO section definitions*/ 166/*IO section definitions*/
119#define IOREQ_READ 1 167#define IOREQ_READ 1
@@ -403,14 +451,13 @@ struct kvm_sal_data {
403}; 451};
404 452
405struct kvm_arch { 453struct kvm_arch {
454 spinlock_t dirty_log_lock;
455
406 unsigned long vm_base; 456 unsigned long vm_base;
407 unsigned long metaphysical_rr0; 457 unsigned long metaphysical_rr0;
408 unsigned long metaphysical_rr4; 458 unsigned long metaphysical_rr4;
409 unsigned long vmm_init_rr; 459 unsigned long vmm_init_rr;
410 unsigned long vhpt_base; 460
411 unsigned long vtlb_base;
412 unsigned long vpd_base;
413 spinlock_t dirty_log_lock;
414 struct kvm_ioapic *vioapic; 461 struct kvm_ioapic *vioapic;
415 struct kvm_vm_stat stat; 462 struct kvm_vm_stat stat;
416 struct kvm_sal_data rdv_sal_data; 463 struct kvm_sal_data rdv_sal_data;
@@ -512,7 +559,7 @@ struct kvm_pt_regs {
512 559
513static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) 560static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
514{ 561{
515 return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; 562 return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
516} 563}
517 564
518typedef int kvm_vmm_entry(void); 565typedef int kvm_vmm_entry(void);
@@ -531,5 +578,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
531void kvm_sal_emul(struct kvm_vcpu *vcpu); 578void kvm_sal_emul(struct kvm_vcpu *vcpu);
532 579
533static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} 580static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
581#endif /* __ASSEMBLY__*/
534 582
535#endif 583#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index af1464f7a6ad..43e45f6afcda 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -698,27 +698,24 @@ out:
698 return r; 698 return r;
699} 699}
700 700
701/*
702 * Allocate 16M memory for every vm to hold its specific data.
703 * Its memory map is defined in kvm_host.h.
704 */
705static struct kvm *kvm_alloc_kvm(void) 701static struct kvm *kvm_alloc_kvm(void)
706{ 702{
707 703
708 struct kvm *kvm; 704 struct kvm *kvm;
709 uint64_t vm_base; 705 uint64_t vm_base;
710 706
707 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
708
711 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); 709 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
712 710
713 if (!vm_base) 711 if (!vm_base)
714 return ERR_PTR(-ENOMEM); 712 return ERR_PTR(-ENOMEM);
715 printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
716 713
717 /* Zero all pages before use! */
718 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); 714 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
719 715 kvm = (struct kvm *)(vm_base +
720 kvm = (struct kvm *)(vm_base + KVM_VM_OFS); 716 offsetof(struct kvm_vm_data, kvm_vm_struct));
721 kvm->arch.vm_base = vm_base; 717 kvm->arch.vm_base = vm_base;
718 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
722 719
723 return kvm; 720 return kvm;
724} 721}
@@ -760,21 +757,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
760 757
761static void kvm_init_vm(struct kvm *kvm) 758static void kvm_init_vm(struct kvm *kvm)
762{ 759{
763 long vm_base;
764
765 BUG_ON(!kvm); 760 BUG_ON(!kvm);
766 761
767 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; 762 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
768 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; 763 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
769 kvm->arch.vmm_init_rr = VMM_INIT_RR; 764 kvm->arch.vmm_init_rr = VMM_INIT_RR;
770 765
771 vm_base = kvm->arch.vm_base;
772 if (vm_base) {
773 kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
774 kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
775 kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
776 }
777
778 /* 766 /*
779 *Fill P2M entries for MMIO/IO ranges 767 *Fill P2M entries for MMIO/IO ranges
780 */ 768 */
@@ -864,7 +852,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
864 goto out; 852 goto out;
865 r = copy_from_user(vcpu + 1, regs->saved_stack + 853 r = copy_from_user(vcpu + 1, regs->saved_stack +
866 sizeof(struct kvm_vcpu), 854 sizeof(struct kvm_vcpu),
867 IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); 855 KVM_STK_OFFSET - sizeof(struct kvm_vcpu));
868 if (r) 856 if (r)
869 goto out; 857 goto out;
870 vcpu->arch.exit_data = 858 vcpu->arch.exit_data =
@@ -1166,10 +1154,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1166 /*Set entry address for first run.*/ 1154 /*Set entry address for first run.*/
1167 regs->cr_iip = PALE_RESET_ENTRY; 1155 regs->cr_iip = PALE_RESET_ENTRY;
1168 1156
1169 /*Initilize itc offset for vcpus*/ 1157 /*Initialize itc offset for vcpus*/
1170 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); 1158 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1171 for (i = 0; i < MAX_VCPU_NUM; i++) { 1159 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1172 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); 1160 v = (struct kvm_vcpu *)((char *)vcpu +
1161 sizeof(struct kvm_vcpu_data) * i);
1173 v->arch.itc_offset = itc_offset; 1162 v->arch.itc_offset = itc_offset;
1174 v->arch.last_itc = 0; 1163 v->arch.last_itc = 0;
1175 } 1164 }
@@ -1183,7 +1172,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1183 vcpu->arch.apic->vcpu = vcpu; 1172 vcpu->arch.apic->vcpu = vcpu;
1184 1173
1185 p_ctx->gr[1] = 0; 1174 p_ctx->gr[1] = 0;
1186 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); 1175 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
1187 p_ctx->gr[13] = (unsigned long)vmm_vcpu; 1176 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1188 p_ctx->psr = 0x1008522000UL; 1177 p_ctx->psr = 0x1008522000UL;
1189 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ 1178 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
@@ -1218,12 +1207,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1218 vcpu->arch.hlt_timer.function = hlt_timer_fn; 1207 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1219 1208
1220 vcpu->arch.last_run_cpu = -1; 1209 vcpu->arch.last_run_cpu = -1;
1221 vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); 1210 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
1222 vcpu->arch.vsa_base = kvm_vsa_base; 1211 vcpu->arch.vsa_base = kvm_vsa_base;
1223 vcpu->arch.__gp = kvm_vmm_gp; 1212 vcpu->arch.__gp = kvm_vmm_gp;
1224 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); 1213 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1225 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); 1214 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1226 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); 1215 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
1227 init_ptce_info(vcpu); 1216 init_ptce_info(vcpu);
1228 1217
1229 r = 0; 1218 r = 0;
@@ -1273,12 +1262,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1273 int r; 1262 int r;
1274 int cpu; 1263 int cpu;
1275 1264
1265 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1266
1267 r = -EINVAL;
1268 if (id >= KVM_MAX_VCPUS) {
1269 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1270 KVM_MAX_VCPUS);
1271 goto fail;
1272 }
1273
1276 r = -ENOMEM; 1274 r = -ENOMEM;
1277 if (!vm_base) { 1275 if (!vm_base) {
1278 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); 1276 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1279 goto fail; 1277 goto fail;
1280 } 1278 }
1281 vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); 1279 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1280 vcpu_data[id].vcpu_struct));
1282 vcpu->kvm = kvm; 1281 vcpu->kvm = kvm;
1283 1282
1284 cpu = get_cpu(); 1283 cpu = get_cpu();
@@ -1396,7 +1395,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1396 sizeof(union context)); 1395 sizeof(union context));
1397 if (r) 1396 if (r)
1398 goto out; 1397 goto out;
1399 r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); 1398 r = copy_to_user(regs->saved_stack, (void *)vcpu, KVM_STK_OFFSET);
1400 if (r) 1399 if (r)
1401 goto out; 1400 goto out;
1402 SAVE_REGS(mp_state); 1401 SAVE_REGS(mp_state);
@@ -1457,6 +1456,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1457 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; 1456 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1458 unsigned long base_gfn = memslot->base_gfn; 1457 unsigned long base_gfn = memslot->base_gfn;
1459 1458
1459 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1460 return -ENOMEM;
1461
1460 for (i = 0; i < npages; i++) { 1462 for (i = 0; i < npages; i++) {
1461 pfn = gfn_to_pfn(kvm, base_gfn + i); 1463 pfn = gfn_to_pfn(kvm, base_gfn + i);
1462 if (!kvm_is_mmio_pfn(pfn)) { 1464 if (!kvm_is_mmio_pfn(pfn)) {
@@ -1631,8 +1633,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1631 struct kvm_memory_slot *memslot; 1633 struct kvm_memory_slot *memslot;
1632 int r, i; 1634 int r, i;
1633 long n, base; 1635 long n, base;
1634 unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS 1636 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1635 + KVM_MEM_DIRTY_LOG_OFS); 1637 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1636 1638
1637 r = -EINVAL; 1639 r = -EINVAL;
1638 if (log->slot >= KVM_MEMORY_SLOTS) 1640 if (log->slot >= KVM_MEMORY_SLOTS)
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
index 2cc41d17cf99..b2bcaa2787aa 100644
--- a/arch/ia64/kvm/kvm_minstate.h
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -24,6 +24,8 @@
24#include <asm/asmmacro.h> 24#include <asm/asmmacro.h>
25#include <asm/types.h> 25#include <asm/types.h>
26#include <asm/kregs.h> 26#include <asm/kregs.h>
27#include <asm/kvm_host.h>
28
27#include "asm-offsets.h" 29#include "asm-offsets.h"
28 30
29#define KVM_MINSTATE_START_SAVE_MIN \ 31#define KVM_MINSTATE_START_SAVE_MIN \
@@ -33,7 +35,7 @@
33 addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ 35 addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
34 ;; \ 36 ;; \
35 lfetch.fault.excl.nt1 [r22]; \ 37 lfetch.fault.excl.nt1 [r22]; \
36 addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 38 addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
37 mov r23 = ar.bspstore; /* save ar.bspstore */ \ 39 mov r23 = ar.bspstore; /* save ar.bspstore */ \
38 ;; \ 40 ;; \
39 mov ar.bspstore = r22; /* switch to kernel RBS */\ 41 mov ar.bspstore = r22; /* switch to kernel RBS */\
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
index e585c4607344..dd979e00b574 100644
--- a/arch/ia64/kvm/misc.h
+++ b/arch/ia64/kvm/misc.h
@@ -27,7 +27,8 @@
27 */ 27 */
28static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) 28static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
29{ 29{
30 return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS); 30 return (uint64_t *)(kvm->arch.vm_base +
31 offsetof(struct kvm_vm_data, kvm_p2m));
31} 32}
32 33
33static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, 34static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index e44027ce5667..a528d70a820c 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -816,8 +816,9 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
816 unsigned long vitv = VCPU(vcpu, itv); 816 unsigned long vitv = VCPU(vcpu, itv);
817 817
818 if (vcpu->vcpu_id == 0) { 818 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < MAX_VCPU_NUM; i++) { 819 for (i = 0; i < KVM_MAX_VCPUS; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); 820 v = (struct kvm_vcpu *)((char *)vcpu +
821 sizeof(struct kvm_vcpu_data) * i);
821 VMX(v, itc_offset) = itc_offset; 822 VMX(v, itc_offset) = itc_offset;
822 VMX(v, last_itc) = 0; 823 VMX(v, last_itc) = 0;
823 } 824 }
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index e22b93361e08..6b6307a3bd55 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -183,8 +183,8 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
183 u64 i, dirty_pages = 1; 183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; 184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); 185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) 186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187 + KVM_MEM_DIRTY_LOG_OFS; 187
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; 188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
189 189
190 vmm_spin_lock(lock); 190 vmm_spin_lock(lock);