diff options
Diffstat (limited to 'arch/ia64/include/asm/kvm_host.h')
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 192 |
1 files changed, 120 insertions, 72 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index c60d324da540..678e2646a500 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -23,17 +23,6 @@ | |||
23 | #ifndef __ASM_KVM_HOST_H | 23 | #ifndef __ASM_KVM_HOST_H |
24 | #define __ASM_KVM_HOST_H | 24 | #define __ASM_KVM_HOST_H |
25 | 25 | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/kvm.h> | ||
30 | #include <linux/kvm_para.h> | ||
31 | #include <linux/kvm_types.h> | ||
32 | |||
33 | #include <asm/pal.h> | ||
34 | #include <asm/sal.h> | ||
35 | |||
36 | #define KVM_MAX_VCPUS 4 | ||
37 | #define KVM_MEMORY_SLOTS 32 | 26 | #define KVM_MEMORY_SLOTS 32 |
38 | /* memory slots that does not exposed to userspace */ | 27 | /* memory slots that does not exposed to userspace */ |
39 | #define KVM_PRIVATE_MEM_SLOTS 4 | 28 | #define KVM_PRIVATE_MEM_SLOTS 4 |
@@ -52,68 +41,127 @@ | |||
52 | #define EXIT_REASON_PTC_G 8 | 41 | #define EXIT_REASON_PTC_G 8 |
53 | 42 | ||
54 | /*Define vmm address space and vm data space.*/ | 43 | /*Define vmm address space and vm data space.*/ |
55 | #define KVM_VMM_SIZE (16UL<<20) | 44 | #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) |
56 | #define KVM_VMM_SHIFT 24 | 45 | #define KVM_VMM_SHIFT 24 |
57 | #define KVM_VMM_BASE 0xD000000000000000UL | 46 | #define KVM_VMM_BASE 0xD000000000000000 |
58 | #define VMM_SIZE (8UL<<20) | 47 | #define VMM_SIZE (__IA64_UL_CONST(8)<<20) |
59 | 48 | ||
60 | /* | 49 | /* |
61 | * Define vm_buffer, used by PAL Services, base address. | 50 | * Define vm_buffer, used by PAL Services, base address. |
62 | * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M | 51 | * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M |
63 | */ | 52 | */ |
64 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) | 53 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) |
65 | #define KVM_VM_BUFFER_SIZE (8UL<<20) | 54 | #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) |
66 | 55 | ||
67 | /*Define Virtual machine data layout.*/ | 56 | /* |
68 | #define KVM_VM_DATA_SHIFT 24 | 57 | * kvm guest's data area looks as follow: |
69 | #define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) | 58 | * |
70 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) | 59 | * +----------------------+ ------- KVM_VM_DATA_SIZE |
71 | 60 | * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET | |
72 | 61 | * | | | / | | |
73 | #define KVM_P2M_BASE KVM_VM_DATA_BASE | 62 | * | .......... | | /vcpu's struct&stack | |
74 | #define KVM_P2M_OFS 0 | 63 | * | .......... | | /---------------------|---- 0 |
75 | #define KVM_P2M_SIZE (8UL << 20) | 64 | * | vcpu[5]'s data | | / vpd | |
76 | 65 | * | vcpu[4]'s data | |/-----------------------| | |
77 | #define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) | 66 | * | vcpu[3]'s data | / vtlb | |
78 | #define KVM_VHPT_OFS KVM_P2M_SIZE | 67 | * | vcpu[2]'s data | /|------------------------| |
79 | #define KVM_VHPT_BLOCK_SIZE (2UL << 20) | 68 | * | vcpu[1]'s data |/ | vhpt | |
80 | #define VHPT_SHIFT 18 | 69 | * | vcpu[0]'s data |____________________________| |
81 | #define VHPT_SIZE (1UL << VHPT_SHIFT) | 70 | * +----------------------+ | |
82 | #define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) | 71 | * | memory dirty log | | |
83 | 72 | * +----------------------+ | | |
84 | #define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE) | 73 | * | vm's data struct | | |
85 | #define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE) | 74 | * +----------------------+ | |
86 | #define KVM_VTLB_BLOCK_SIZE (1UL<<20) | 75 | * | | | |
87 | #define VTLB_SHIFT 17 | 76 | * | | | |
88 | #define VTLB_SIZE (1UL<<VTLB_SHIFT) | 77 | * | | | |
89 | #define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5)) | 78 | * | | | |
90 | 79 | * | | | | |
91 | #define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE) | 80 | * | | | |
92 | #define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE) | 81 | * | | | |
93 | #define KVM_VPD_BLOCK_SIZE (2UL<<20) | 82 | * | vm's p2m table | | |
94 | #define VPD_SHIFT 16 | 83 | * | | | |
95 | #define VPD_SIZE (1UL<<VPD_SHIFT) | 84 | * | | | |
96 | 85 | * | | | | | |
97 | #define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) | 86 | * vm's data->| | | | |
98 | #define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) | 87 | * +----------------------+ ------- 0 |
99 | #define KVM_VCPU_BLOCK_SIZE (2UL<<20) | 88 | * To support large memory, needs to increase the size of p2m. |
100 | #define VCPU_SHIFT 18 | 89 | * To support more vcpus, needs to ensure it has enough space to |
101 | #define VCPU_SIZE (1UL<<VCPU_SHIFT) | 90 | * hold vcpus' data. |
102 | #define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE | 91 | */ |
103 | 92 | ||
104 | #define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) | 93 | #define KVM_VM_DATA_SHIFT 26 |
105 | #define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE) | 94 | #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) |
106 | #define KVM_VM_BLOCK_SIZE (1UL<<19) | 95 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) |
107 | 96 | ||
108 | #define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) | 97 | #define KVM_P2M_BASE KVM_VM_DATA_BASE |
109 | #define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) | 98 | #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) |
110 | #define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) | 99 | |
111 | 100 | #define VHPT_SHIFT 16 | |
112 | /* Get vpd, vhpt, tlb, vcpu, base*/ | 101 | #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) |
113 | #define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE) | 102 | #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) |
114 | #define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) | 103 | |
115 | #define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) | 104 | #define VTLB_SHIFT 16 |
116 | #define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) | 105 | #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) |
106 | #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) | ||
107 | |||
108 | #define VPD_SHIFT 16 | ||
109 | #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) | ||
110 | |||
111 | #define VCPU_STRUCT_SHIFT 16 | ||
112 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) | ||
113 | |||
114 | #define KVM_STK_OFFSET VCPU_STRUCT_SIZE | ||
115 | |||
116 | #define KVM_VM_STRUCT_SHIFT 19 | ||
117 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) | ||
118 | |||
119 | #define KVM_MEM_DIRY_LOG_SHIFT 19 | ||
120 | #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) | ||
121 | |||
122 | #ifndef __ASSEMBLY__ | ||
123 | |||
124 | /*Define the max vcpus and memory for Guests.*/ | ||
125 | #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ | ||
126 | KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) | ||
127 | #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) | ||
128 | |||
129 | #include <linux/types.h> | ||
130 | #include <linux/mm.h> | ||
131 | #include <linux/kvm.h> | ||
132 | #include <linux/kvm_para.h> | ||
133 | #include <linux/kvm_types.h> | ||
134 | |||
135 | #include <asm/pal.h> | ||
136 | #include <asm/sal.h> | ||
137 | #include <asm/page.h> | ||
138 | |||
139 | struct kvm_vcpu_data { | ||
140 | char vcpu_vhpt[VHPT_SIZE]; | ||
141 | char vcpu_vtlb[VTLB_SIZE]; | ||
142 | char vcpu_vpd[VPD_SIZE]; | ||
143 | char vcpu_struct[VCPU_STRUCT_SIZE]; | ||
144 | }; | ||
145 | |||
146 | struct kvm_vm_data { | ||
147 | char kvm_p2m[KVM_P2M_SIZE]; | ||
148 | char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; | ||
149 | char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; | ||
150 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; | ||
151 | }; | ||
152 | |||
153 | #define VCPU_BASE(n) KVM_VM_DATA_BASE + \ | ||
154 | offsetof(struct kvm_vm_data, vcpu_data[n]) | ||
155 | #define VM_BASE KVM_VM_DATA_BASE + \ | ||
156 | offsetof(struct kvm_vm_data, kvm_vm_struct) | ||
157 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ | ||
158 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) | ||
159 | |||
160 | #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) | ||
161 | #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) | ||
162 | #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) | ||
163 | #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ | ||
164 | offsetof(struct kvm_vcpu_data, vcpu_struct)) | ||
117 | 165 | ||
118 | /*IO section definitions*/ | 166 | /*IO section definitions*/ |
119 | #define IOREQ_READ 1 | 167 | #define IOREQ_READ 1 |
@@ -403,14 +451,13 @@ struct kvm_sal_data { | |||
403 | }; | 451 | }; |
404 | 452 | ||
405 | struct kvm_arch { | 453 | struct kvm_arch { |
454 | spinlock_t dirty_log_lock; | ||
455 | |||
406 | unsigned long vm_base; | 456 | unsigned long vm_base; |
407 | unsigned long metaphysical_rr0; | 457 | unsigned long metaphysical_rr0; |
408 | unsigned long metaphysical_rr4; | 458 | unsigned long metaphysical_rr4; |
409 | unsigned long vmm_init_rr; | 459 | unsigned long vmm_init_rr; |
410 | unsigned long vhpt_base; | 460 | |
411 | unsigned long vtlb_base; | ||
412 | unsigned long vpd_base; | ||
413 | spinlock_t dirty_log_lock; | ||
414 | struct kvm_ioapic *vioapic; | 461 | struct kvm_ioapic *vioapic; |
415 | struct kvm_vm_stat stat; | 462 | struct kvm_vm_stat stat; |
416 | struct kvm_sal_data rdv_sal_data; | 463 | struct kvm_sal_data rdv_sal_data; |
@@ -512,7 +559,7 @@ struct kvm_pt_regs { | |||
512 | 559 | ||
513 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) | 560 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) |
514 | { | 561 | { |
515 | return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; | 562 | return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; |
516 | } | 563 | } |
517 | 564 | ||
518 | typedef int kvm_vmm_entry(void); | 565 | typedef int kvm_vmm_entry(void); |
@@ -531,5 +578,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | |||
531 | void kvm_sal_emul(struct kvm_vcpu *vcpu); | 578 | void kvm_sal_emul(struct kvm_vcpu *vcpu); |
532 | 579 | ||
533 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} | 580 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} |
581 | #endif /* __ASSEMBLY__*/ | ||
534 | 582 | ||
535 | #endif | 583 | #endif |