diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/ioapic.c | 403 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 95 | ||||
-rw-r--r-- | virt/kvm/iodev.h | 63 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 1400 |
4 files changed, 1961 insertions, 0 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c new file mode 100644 index 000000000000..317f8e211cd2 --- /dev/null +++ b/virt/kvm/ioapic.c | |||
@@ -0,0 +1,403 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 MandrakeSoft S.A. | ||
3 | * | ||
4 | * MandrakeSoft S.A. | ||
5 | * 43, rue d'Aboukir | ||
6 | * 75002 Paris - France | ||
7 | * http://www.linux-mandrake.com/ | ||
8 | * http://www.mandrakesoft.com/ | ||
9 | * | ||
10 | * This library is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU Lesser General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This library is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * Lesser General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU Lesser General Public | ||
21 | * License along with this library; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | * | ||
24 | * Yunhong Jiang <yunhong.jiang@intel.com> | ||
25 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
26 | * Based on Xen 3.1 code. | ||
27 | */ | ||
28 | |||
29 | #include <linux/kvm_host.h> | ||
30 | #include <linux/kvm.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/smp.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <asm/processor.h> | ||
37 | #include <asm/page.h> | ||
38 | #include <asm/current.h> | ||
39 | |||
40 | #include "ioapic.h" | ||
41 | #include "lapic.h" | ||
42 | |||
43 | #if 0 | ||
44 | #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) | ||
45 | #else | ||
46 | #define ioapic_debug(fmt, arg...) | ||
47 | #endif | ||
48 | static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq); | ||
49 | |||
50 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | ||
51 | unsigned long addr, | ||
52 | unsigned long length) | ||
53 | { | ||
54 | unsigned long result = 0; | ||
55 | |||
56 | switch (ioapic->ioregsel) { | ||
57 | case IOAPIC_REG_VERSION: | ||
58 | result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | ||
59 | | (IOAPIC_VERSION_ID & 0xff)); | ||
60 | break; | ||
61 | |||
62 | case IOAPIC_REG_APIC_ID: | ||
63 | case IOAPIC_REG_ARB_ID: | ||
64 | result = ((ioapic->id & 0xf) << 24); | ||
65 | break; | ||
66 | |||
67 | default: | ||
68 | { | ||
69 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; | ||
70 | u64 redir_content; | ||
71 | |||
72 | ASSERT(redir_index < IOAPIC_NUM_PINS); | ||
73 | |||
74 | redir_content = ioapic->redirtbl[redir_index].bits; | ||
75 | result = (ioapic->ioregsel & 0x1) ? | ||
76 | (redir_content >> 32) & 0xffffffff : | ||
77 | redir_content & 0xffffffff; | ||
78 | break; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | return result; | ||
83 | } | ||
84 | |||
85 | static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | ||
86 | { | ||
87 | union ioapic_redir_entry *pent; | ||
88 | |||
89 | pent = &ioapic->redirtbl[idx]; | ||
90 | |||
91 | if (!pent->fields.mask) { | ||
92 | ioapic_deliver(ioapic, idx); | ||
93 | if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | ||
94 | pent->fields.remote_irr = 1; | ||
95 | } | ||
96 | if (!pent->fields.trig_mode) | ||
97 | ioapic->irr &= ~(1 << idx); | ||
98 | } | ||
99 | |||
100 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | ||
101 | { | ||
102 | unsigned index; | ||
103 | |||
104 | switch (ioapic->ioregsel) { | ||
105 | case IOAPIC_REG_VERSION: | ||
106 | /* Writes are ignored. */ | ||
107 | break; | ||
108 | |||
109 | case IOAPIC_REG_APIC_ID: | ||
110 | ioapic->id = (val >> 24) & 0xf; | ||
111 | break; | ||
112 | |||
113 | case IOAPIC_REG_ARB_ID: | ||
114 | break; | ||
115 | |||
116 | default: | ||
117 | index = (ioapic->ioregsel - 0x10) >> 1; | ||
118 | |||
119 | ioapic_debug("change redir index %x val %x\n", index, val); | ||
120 | if (index >= IOAPIC_NUM_PINS) | ||
121 | return; | ||
122 | if (ioapic->ioregsel & 1) { | ||
123 | ioapic->redirtbl[index].bits &= 0xffffffff; | ||
124 | ioapic->redirtbl[index].bits |= (u64) val << 32; | ||
125 | } else { | ||
126 | ioapic->redirtbl[index].bits &= ~0xffffffffULL; | ||
127 | ioapic->redirtbl[index].bits |= (u32) val; | ||
128 | ioapic->redirtbl[index].fields.remote_irr = 0; | ||
129 | } | ||
130 | if (ioapic->irr & (1 << index)) | ||
131 | ioapic_service(ioapic, index); | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static void ioapic_inj_irq(struct kvm_ioapic *ioapic, | ||
137 | struct kvm_vcpu *vcpu, | ||
138 | u8 vector, u8 trig_mode, u8 delivery_mode) | ||
139 | { | ||
140 | ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode, | ||
141 | delivery_mode); | ||
142 | |||
143 | ASSERT((delivery_mode == IOAPIC_FIXED) || | ||
144 | (delivery_mode == IOAPIC_LOWEST_PRIORITY)); | ||
145 | |||
146 | kvm_apic_set_irq(vcpu, vector, trig_mode); | ||
147 | } | ||
148 | |||
149 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | ||
150 | u8 dest_mode) | ||
151 | { | ||
152 | u32 mask = 0; | ||
153 | int i; | ||
154 | struct kvm *kvm = ioapic->kvm; | ||
155 | struct kvm_vcpu *vcpu; | ||
156 | |||
157 | ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode); | ||
158 | |||
159 | if (dest_mode == 0) { /* Physical mode. */ | ||
160 | if (dest == 0xFF) { /* Broadcast. */ | ||
161 | for (i = 0; i < KVM_MAX_VCPUS; ++i) | ||
162 | if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic) | ||
163 | mask |= 1 << i; | ||
164 | return mask; | ||
165 | } | ||
166 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
167 | vcpu = kvm->vcpus[i]; | ||
168 | if (!vcpu) | ||
169 | continue; | ||
170 | if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) { | ||
171 | if (vcpu->arch.apic) | ||
172 | mask = 1 << i; | ||
173 | break; | ||
174 | } | ||
175 | } | ||
176 | } else if (dest != 0) /* Logical mode, MDA non-zero. */ | ||
177 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
178 | vcpu = kvm->vcpus[i]; | ||
179 | if (!vcpu) | ||
180 | continue; | ||
181 | if (vcpu->arch.apic && | ||
182 | kvm_apic_match_logical_addr(vcpu->arch.apic, dest)) | ||
183 | mask |= 1 << vcpu->vcpu_id; | ||
184 | } | ||
185 | ioapic_debug("mask %x\n", mask); | ||
186 | return mask; | ||
187 | } | ||
188 | |||
189 | static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | ||
190 | { | ||
191 | u8 dest = ioapic->redirtbl[irq].fields.dest_id; | ||
192 | u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode; | ||
193 | u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode; | ||
194 | u8 vector = ioapic->redirtbl[irq].fields.vector; | ||
195 | u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; | ||
196 | u32 deliver_bitmask; | ||
197 | struct kvm_vcpu *vcpu; | ||
198 | int vcpu_id; | ||
199 | |||
200 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | ||
201 | "vector=%x trig_mode=%x\n", | ||
202 | dest, dest_mode, delivery_mode, vector, trig_mode); | ||
203 | |||
204 | deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode); | ||
205 | if (!deliver_bitmask) { | ||
206 | ioapic_debug("no target on destination\n"); | ||
207 | return; | ||
208 | } | ||
209 | |||
210 | switch (delivery_mode) { | ||
211 | case IOAPIC_LOWEST_PRIORITY: | ||
212 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, | ||
213 | deliver_bitmask); | ||
214 | if (vcpu != NULL) | ||
215 | ioapic_inj_irq(ioapic, vcpu, vector, | ||
216 | trig_mode, delivery_mode); | ||
217 | else | ||
218 | ioapic_debug("null lowest prio vcpu: " | ||
219 | "mask=%x vector=%x delivery_mode=%x\n", | ||
220 | deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); | ||
221 | break; | ||
222 | case IOAPIC_FIXED: | ||
223 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { | ||
224 | if (!(deliver_bitmask & (1 << vcpu_id))) | ||
225 | continue; | ||
226 | deliver_bitmask &= ~(1 << vcpu_id); | ||
227 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
228 | if (vcpu) { | ||
229 | ioapic_inj_irq(ioapic, vcpu, vector, | ||
230 | trig_mode, delivery_mode); | ||
231 | } | ||
232 | } | ||
233 | break; | ||
234 | |||
235 | /* TODO: NMI */ | ||
236 | default: | ||
237 | printk(KERN_WARNING "Unsupported delivery mode %d\n", | ||
238 | delivery_mode); | ||
239 | break; | ||
240 | } | ||
241 | } | ||
242 | |||
243 | void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | ||
244 | { | ||
245 | u32 old_irr = ioapic->irr; | ||
246 | u32 mask = 1 << irq; | ||
247 | union ioapic_redir_entry entry; | ||
248 | |||
249 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | ||
250 | entry = ioapic->redirtbl[irq]; | ||
251 | level ^= entry.fields.polarity; | ||
252 | if (!level) | ||
253 | ioapic->irr &= ~mask; | ||
254 | else { | ||
255 | ioapic->irr |= mask; | ||
256 | if ((!entry.fields.trig_mode && old_irr != ioapic->irr) | ||
257 | || !entry.fields.remote_irr) | ||
258 | ioapic_service(ioapic, irq); | ||
259 | } | ||
260 | } | ||
261 | } | ||
262 | |||
263 | static int get_eoi_gsi(struct kvm_ioapic *ioapic, int vector) | ||
264 | { | ||
265 | int i; | ||
266 | |||
267 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | ||
268 | if (ioapic->redirtbl[i].fields.vector == vector) | ||
269 | return i; | ||
270 | return -1; | ||
271 | } | ||
272 | |||
273 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector) | ||
274 | { | ||
275 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
276 | union ioapic_redir_entry *ent; | ||
277 | int gsi; | ||
278 | |||
279 | gsi = get_eoi_gsi(ioapic, vector); | ||
280 | if (gsi == -1) { | ||
281 | printk(KERN_WARNING "Can't find redir item for %d EOI\n", | ||
282 | vector); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | ent = &ioapic->redirtbl[gsi]; | ||
287 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | ||
288 | |||
289 | ent->fields.remote_irr = 0; | ||
290 | if (!ent->fields.mask && (ioapic->irr & (1 << gsi))) | ||
291 | ioapic_deliver(ioapic, gsi); | ||
292 | } | ||
293 | |||
294 | static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr) | ||
295 | { | ||
296 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; | ||
297 | |||
298 | return ((addr >= ioapic->base_address && | ||
299 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); | ||
300 | } | ||
301 | |||
302 | static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | ||
303 | void *val) | ||
304 | { | ||
305 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; | ||
306 | u32 result; | ||
307 | |||
308 | ioapic_debug("addr %lx\n", (unsigned long)addr); | ||
309 | ASSERT(!(addr & 0xf)); /* check alignment */ | ||
310 | |||
311 | addr &= 0xff; | ||
312 | switch (addr) { | ||
313 | case IOAPIC_REG_SELECT: | ||
314 | result = ioapic->ioregsel; | ||
315 | break; | ||
316 | |||
317 | case IOAPIC_REG_WINDOW: | ||
318 | result = ioapic_read_indirect(ioapic, addr, len); | ||
319 | break; | ||
320 | |||
321 | default: | ||
322 | result = 0; | ||
323 | break; | ||
324 | } | ||
325 | switch (len) { | ||
326 | case 8: | ||
327 | *(u64 *) val = result; | ||
328 | break; | ||
329 | case 1: | ||
330 | case 2: | ||
331 | case 4: | ||
332 | memcpy(val, (char *)&result, len); | ||
333 | break; | ||
334 | default: | ||
335 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); | ||
336 | } | ||
337 | } | ||
338 | |||
339 | static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | ||
340 | const void *val) | ||
341 | { | ||
342 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; | ||
343 | u32 data; | ||
344 | |||
345 | ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", | ||
346 | (void*)addr, len, val); | ||
347 | ASSERT(!(addr & 0xf)); /* check alignment */ | ||
348 | if (len == 4 || len == 8) | ||
349 | data = *(u32 *) val; | ||
350 | else { | ||
351 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); | ||
352 | return; | ||
353 | } | ||
354 | |||
355 | addr &= 0xff; | ||
356 | switch (addr) { | ||
357 | case IOAPIC_REG_SELECT: | ||
358 | ioapic->ioregsel = data; | ||
359 | break; | ||
360 | |||
361 | case IOAPIC_REG_WINDOW: | ||
362 | ioapic_write_indirect(ioapic, data); | ||
363 | break; | ||
364 | #ifdef CONFIG_IA64 | ||
365 | case IOAPIC_REG_EOI: | ||
366 | kvm_ioapic_update_eoi(ioapic->kvm, data); | ||
367 | break; | ||
368 | #endif | ||
369 | |||
370 | default: | ||
371 | break; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | ||
376 | { | ||
377 | int i; | ||
378 | |||
379 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | ||
380 | ioapic->redirtbl[i].fields.mask = 1; | ||
381 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; | ||
382 | ioapic->ioregsel = 0; | ||
383 | ioapic->irr = 0; | ||
384 | ioapic->id = 0; | ||
385 | } | ||
386 | |||
387 | int kvm_ioapic_init(struct kvm *kvm) | ||
388 | { | ||
389 | struct kvm_ioapic *ioapic; | ||
390 | |||
391 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | ||
392 | if (!ioapic) | ||
393 | return -ENOMEM; | ||
394 | kvm->arch.vioapic = ioapic; | ||
395 | kvm_ioapic_reset(ioapic); | ||
396 | ioapic->dev.read = ioapic_mmio_read; | ||
397 | ioapic->dev.write = ioapic_mmio_write; | ||
398 | ioapic->dev.in_range = ioapic_in_range; | ||
399 | ioapic->dev.private = ioapic; | ||
400 | ioapic->kvm = kvm; | ||
401 | kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev); | ||
402 | return 0; | ||
403 | } | ||
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h new file mode 100644 index 000000000000..7f16675fe783 --- /dev/null +++ b/virt/kvm/ioapic.h | |||
@@ -0,0 +1,95 @@ | |||
1 | #ifndef __KVM_IO_APIC_H | ||
2 | #define __KVM_IO_APIC_H | ||
3 | |||
4 | #include <linux/kvm_host.h> | ||
5 | |||
6 | #include "iodev.h" | ||
7 | |||
8 | struct kvm; | ||
9 | struct kvm_vcpu; | ||
10 | |||
11 | #define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS | ||
12 | #define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */ | ||
13 | #define IOAPIC_EDGE_TRIG 0 | ||
14 | #define IOAPIC_LEVEL_TRIG 1 | ||
15 | |||
16 | #define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 | ||
17 | #define IOAPIC_MEM_LENGTH 0x100 | ||
18 | |||
19 | /* Direct registers. */ | ||
20 | #define IOAPIC_REG_SELECT 0x00 | ||
21 | #define IOAPIC_REG_WINDOW 0x10 | ||
22 | #define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */ | ||
23 | |||
24 | /* Indirect registers. */ | ||
25 | #define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ | ||
26 | #define IOAPIC_REG_VERSION 0x01 | ||
27 | #define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ | ||
28 | |||
29 | /*ioapic delivery mode*/ | ||
30 | #define IOAPIC_FIXED 0x0 | ||
31 | #define IOAPIC_LOWEST_PRIORITY 0x1 | ||
32 | #define IOAPIC_PMI 0x2 | ||
33 | #define IOAPIC_NMI 0x4 | ||
34 | #define IOAPIC_INIT 0x5 | ||
35 | #define IOAPIC_EXTINT 0x7 | ||
36 | |||
37 | struct kvm_ioapic { | ||
38 | u64 base_address; | ||
39 | u32 ioregsel; | ||
40 | u32 id; | ||
41 | u32 irr; | ||
42 | u32 pad; | ||
43 | union ioapic_redir_entry { | ||
44 | u64 bits; | ||
45 | struct { | ||
46 | u8 vector; | ||
47 | u8 delivery_mode:3; | ||
48 | u8 dest_mode:1; | ||
49 | u8 delivery_status:1; | ||
50 | u8 polarity:1; | ||
51 | u8 remote_irr:1; | ||
52 | u8 trig_mode:1; | ||
53 | u8 mask:1; | ||
54 | u8 reserve:7; | ||
55 | u8 reserved[4]; | ||
56 | u8 dest_id; | ||
57 | } fields; | ||
58 | } redirtbl[IOAPIC_NUM_PINS]; | ||
59 | struct kvm_io_device dev; | ||
60 | struct kvm *kvm; | ||
61 | }; | ||
62 | |||
63 | #ifdef DEBUG | ||
64 | #define ASSERT(x) \ | ||
65 | do { \ | ||
66 | if (!(x)) { \ | ||
67 | printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ | ||
68 | __FILE__, __LINE__, #x); \ | ||
69 | BUG(); \ | ||
70 | } \ | ||
71 | } while (0) | ||
72 | #else | ||
73 | #define ASSERT(x) do { } while (0) | ||
74 | #endif | ||
75 | |||
76 | static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) | ||
77 | { | ||
78 | return kvm->arch.vioapic; | ||
79 | } | ||
80 | |||
81 | #ifdef CONFIG_IA64 | ||
82 | static inline int irqchip_in_kernel(struct kvm *kvm) | ||
83 | { | ||
84 | return 1; | ||
85 | } | ||
86 | #endif | ||
87 | |||
88 | struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | ||
89 | unsigned long bitmap); | ||
90 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector); | ||
91 | int kvm_ioapic_init(struct kvm *kvm); | ||
92 | void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); | ||
93 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | ||
94 | |||
95 | #endif | ||
diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h new file mode 100644 index 000000000000..c14e642027b2 --- /dev/null +++ b/virt/kvm/iodev.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | */ | ||
15 | |||
16 | #ifndef __KVM_IODEV_H__ | ||
17 | #define __KVM_IODEV_H__ | ||
18 | |||
19 | #include <linux/kvm_types.h> | ||
20 | |||
21 | struct kvm_io_device { | ||
22 | void (*read)(struct kvm_io_device *this, | ||
23 | gpa_t addr, | ||
24 | int len, | ||
25 | void *val); | ||
26 | void (*write)(struct kvm_io_device *this, | ||
27 | gpa_t addr, | ||
28 | int len, | ||
29 | const void *val); | ||
30 | int (*in_range)(struct kvm_io_device *this, gpa_t addr); | ||
31 | void (*destructor)(struct kvm_io_device *this); | ||
32 | |||
33 | void *private; | ||
34 | }; | ||
35 | |||
36 | static inline void kvm_iodevice_read(struct kvm_io_device *dev, | ||
37 | gpa_t addr, | ||
38 | int len, | ||
39 | void *val) | ||
40 | { | ||
41 | dev->read(dev, addr, len, val); | ||
42 | } | ||
43 | |||
44 | static inline void kvm_iodevice_write(struct kvm_io_device *dev, | ||
45 | gpa_t addr, | ||
46 | int len, | ||
47 | const void *val) | ||
48 | { | ||
49 | dev->write(dev, addr, len, val); | ||
50 | } | ||
51 | |||
52 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) | ||
53 | { | ||
54 | return dev->in_range(dev, addr); | ||
55 | } | ||
56 | |||
57 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | ||
58 | { | ||
59 | if (dev->destructor) | ||
60 | dev->destructor(dev); | ||
61 | } | ||
62 | |||
63 | #endif /* __KVM_IODEV_H__ */ | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c new file mode 100644 index 000000000000..3c4fe26096fc --- /dev/null +++ b/virt/kvm/kvm_main.c | |||
@@ -0,0 +1,1400 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine driver for Linux | ||
3 | * | ||
4 | * This module enables machines with Intel VT-x extensions to run virtual | ||
5 | * machines without emulation or binary translation. | ||
6 | * | ||
7 | * Copyright (C) 2006 Qumranet, Inc. | ||
8 | * | ||
9 | * Authors: | ||
10 | * Avi Kivity <avi@qumranet.com> | ||
11 | * Yaniv Kamay <yaniv@qumranet.com> | ||
12 | * | ||
13 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
14 | * the COPYING file in the top-level directory. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include "iodev.h" | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/percpu.h> | ||
25 | #include <linux/gfp.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/miscdevice.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/reboot.h> | ||
30 | #include <linux/debugfs.h> | ||
31 | #include <linux/highmem.h> | ||
32 | #include <linux/file.h> | ||
33 | #include <linux/sysdev.h> | ||
34 | #include <linux/cpu.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/cpumask.h> | ||
37 | #include <linux/smp.h> | ||
38 | #include <linux/anon_inodes.h> | ||
39 | #include <linux/profile.h> | ||
40 | #include <linux/kvm_para.h> | ||
41 | #include <linux/pagemap.h> | ||
42 | #include <linux/mman.h> | ||
43 | |||
44 | #include <asm/processor.h> | ||
45 | #include <asm/io.h> | ||
46 | #include <asm/uaccess.h> | ||
47 | #include <asm/pgtable.h> | ||
48 | |||
49 | MODULE_AUTHOR("Qumranet"); | ||
50 | MODULE_LICENSE("GPL"); | ||
51 | |||
52 | DEFINE_SPINLOCK(kvm_lock); | ||
53 | LIST_HEAD(vm_list); | ||
54 | |||
55 | static cpumask_t cpus_hardware_enabled; | ||
56 | |||
57 | struct kmem_cache *kvm_vcpu_cache; | ||
58 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | ||
59 | |||
60 | static __read_mostly struct preempt_ops kvm_preempt_ops; | ||
61 | |||
62 | static struct dentry *debugfs_dir; | ||
63 | |||
64 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | ||
65 | unsigned long arg); | ||
66 | |||
67 | static inline int valid_vcpu(int n) | ||
68 | { | ||
69 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Switches to specified vcpu, until a matching vcpu_put() | ||
74 | */ | ||
75 | void vcpu_load(struct kvm_vcpu *vcpu) | ||
76 | { | ||
77 | int cpu; | ||
78 | |||
79 | mutex_lock(&vcpu->mutex); | ||
80 | cpu = get_cpu(); | ||
81 | preempt_notifier_register(&vcpu->preempt_notifier); | ||
82 | kvm_arch_vcpu_load(vcpu, cpu); | ||
83 | put_cpu(); | ||
84 | } | ||
85 | |||
86 | void vcpu_put(struct kvm_vcpu *vcpu) | ||
87 | { | ||
88 | preempt_disable(); | ||
89 | kvm_arch_vcpu_put(vcpu); | ||
90 | preempt_notifier_unregister(&vcpu->preempt_notifier); | ||
91 | preempt_enable(); | ||
92 | mutex_unlock(&vcpu->mutex); | ||
93 | } | ||
94 | |||
95 | static void ack_flush(void *_completed) | ||
96 | { | ||
97 | } | ||
98 | |||
99 | void kvm_flush_remote_tlbs(struct kvm *kvm) | ||
100 | { | ||
101 | int i, cpu; | ||
102 | cpumask_t cpus; | ||
103 | struct kvm_vcpu *vcpu; | ||
104 | |||
105 | cpus_clear(cpus); | ||
106 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
107 | vcpu = kvm->vcpus[i]; | ||
108 | if (!vcpu) | ||
109 | continue; | ||
110 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
111 | continue; | ||
112 | cpu = vcpu->cpu; | ||
113 | if (cpu != -1 && cpu != raw_smp_processor_id()) | ||
114 | cpu_set(cpu, cpus); | ||
115 | } | ||
116 | if (cpus_empty(cpus)) | ||
117 | return; | ||
118 | ++kvm->stat.remote_tlb_flush; | ||
119 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | ||
120 | } | ||
121 | |||
122 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | ||
123 | { | ||
124 | struct page *page; | ||
125 | int r; | ||
126 | |||
127 | mutex_init(&vcpu->mutex); | ||
128 | vcpu->cpu = -1; | ||
129 | vcpu->kvm = kvm; | ||
130 | vcpu->vcpu_id = id; | ||
131 | init_waitqueue_head(&vcpu->wq); | ||
132 | |||
133 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
134 | if (!page) { | ||
135 | r = -ENOMEM; | ||
136 | goto fail; | ||
137 | } | ||
138 | vcpu->run = page_address(page); | ||
139 | |||
140 | r = kvm_arch_vcpu_init(vcpu); | ||
141 | if (r < 0) | ||
142 | goto fail_free_run; | ||
143 | return 0; | ||
144 | |||
145 | fail_free_run: | ||
146 | free_page((unsigned long)vcpu->run); | ||
147 | fail: | ||
148 | return r; | ||
149 | } | ||
150 | EXPORT_SYMBOL_GPL(kvm_vcpu_init); | ||
151 | |||
152 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
153 | { | ||
154 | kvm_arch_vcpu_uninit(vcpu); | ||
155 | free_page((unsigned long)vcpu->run); | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); | ||
158 | |||
159 | static struct kvm *kvm_create_vm(void) | ||
160 | { | ||
161 | struct kvm *kvm = kvm_arch_create_vm(); | ||
162 | |||
163 | if (IS_ERR(kvm)) | ||
164 | goto out; | ||
165 | |||
166 | kvm->mm = current->mm; | ||
167 | atomic_inc(&kvm->mm->mm_count); | ||
168 | spin_lock_init(&kvm->mmu_lock); | ||
169 | kvm_io_bus_init(&kvm->pio_bus); | ||
170 | mutex_init(&kvm->lock); | ||
171 | kvm_io_bus_init(&kvm->mmio_bus); | ||
172 | spin_lock(&kvm_lock); | ||
173 | list_add(&kvm->vm_list, &vm_list); | ||
174 | spin_unlock(&kvm_lock); | ||
175 | out: | ||
176 | return kvm; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Free any memory in @free but not in @dont. | ||
181 | */ | ||
182 | static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | ||
183 | struct kvm_memory_slot *dont) | ||
184 | { | ||
185 | if (!dont || free->rmap != dont->rmap) | ||
186 | vfree(free->rmap); | ||
187 | |||
188 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) | ||
189 | vfree(free->dirty_bitmap); | ||
190 | |||
191 | free->npages = 0; | ||
192 | free->dirty_bitmap = NULL; | ||
193 | free->rmap = NULL; | ||
194 | } | ||
195 | |||
196 | void kvm_free_physmem(struct kvm *kvm) | ||
197 | { | ||
198 | int i; | ||
199 | |||
200 | for (i = 0; i < kvm->nmemslots; ++i) | ||
201 | kvm_free_physmem_slot(&kvm->memslots[i], NULL); | ||
202 | } | ||
203 | |||
204 | static void kvm_destroy_vm(struct kvm *kvm) | ||
205 | { | ||
206 | struct mm_struct *mm = kvm->mm; | ||
207 | |||
208 | spin_lock(&kvm_lock); | ||
209 | list_del(&kvm->vm_list); | ||
210 | spin_unlock(&kvm_lock); | ||
211 | kvm_io_bus_destroy(&kvm->pio_bus); | ||
212 | kvm_io_bus_destroy(&kvm->mmio_bus); | ||
213 | kvm_arch_destroy_vm(kvm); | ||
214 | mmdrop(mm); | ||
215 | } | ||
216 | |||
217 | static int kvm_vm_release(struct inode *inode, struct file *filp) | ||
218 | { | ||
219 | struct kvm *kvm = filp->private_data; | ||
220 | |||
221 | kvm_destroy_vm(kvm); | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Allocate some memory and give it an address in the guest physical address | ||
227 | * space. | ||
228 | * | ||
229 | * Discontiguous memory is allowed, mostly for framebuffers. | ||
230 | * | ||
231 | * Must be called holding mmap_sem for write. | ||
232 | */ | ||
233 | int __kvm_set_memory_region(struct kvm *kvm, | ||
234 | struct kvm_userspace_memory_region *mem, | ||
235 | int user_alloc) | ||
236 | { | ||
237 | int r; | ||
238 | gfn_t base_gfn; | ||
239 | unsigned long npages; | ||
240 | unsigned long i; | ||
241 | struct kvm_memory_slot *memslot; | ||
242 | struct kvm_memory_slot old, new; | ||
243 | |||
244 | r = -EINVAL; | ||
245 | /* General sanity checks */ | ||
246 | if (mem->memory_size & (PAGE_SIZE - 1)) | ||
247 | goto out; | ||
248 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | ||
249 | goto out; | ||
250 | if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) | ||
251 | goto out; | ||
252 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | ||
253 | goto out; | ||
254 | |||
255 | memslot = &kvm->memslots[mem->slot]; | ||
256 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | ||
257 | npages = mem->memory_size >> PAGE_SHIFT; | ||
258 | |||
259 | if (!npages) | ||
260 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; | ||
261 | |||
262 | new = old = *memslot; | ||
263 | |||
264 | new.base_gfn = base_gfn; | ||
265 | new.npages = npages; | ||
266 | new.flags = mem->flags; | ||
267 | |||
268 | /* Disallow changing a memory slot's size. */ | ||
269 | r = -EINVAL; | ||
270 | if (npages && old.npages && npages != old.npages) | ||
271 | goto out_free; | ||
272 | |||
273 | /* Check for overlaps */ | ||
274 | r = -EEXIST; | ||
275 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
276 | struct kvm_memory_slot *s = &kvm->memslots[i]; | ||
277 | |||
278 | if (s == memslot) | ||
279 | continue; | ||
280 | if (!((base_gfn + npages <= s->base_gfn) || | ||
281 | (base_gfn >= s->base_gfn + s->npages))) | ||
282 | goto out_free; | ||
283 | } | ||
284 | |||
285 | /* Free page dirty bitmap if unneeded */ | ||
286 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) | ||
287 | new.dirty_bitmap = NULL; | ||
288 | |||
289 | r = -ENOMEM; | ||
290 | |||
291 | /* Allocate if a slot is being created */ | ||
292 | if (npages && !new.rmap) { | ||
293 | new.rmap = vmalloc(npages * sizeof(struct page *)); | ||
294 | |||
295 | if (!new.rmap) | ||
296 | goto out_free; | ||
297 | |||
298 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | ||
299 | |||
300 | new.user_alloc = user_alloc; | ||
301 | new.userspace_addr = mem->userspace_addr; | ||
302 | } | ||
303 | |||
304 | /* Allocate page dirty bitmap if needed */ | ||
305 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | ||
306 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | ||
307 | |||
308 | new.dirty_bitmap = vmalloc(dirty_bytes); | ||
309 | if (!new.dirty_bitmap) | ||
310 | goto out_free; | ||
311 | memset(new.dirty_bitmap, 0, dirty_bytes); | ||
312 | } | ||
313 | |||
314 | if (mem->slot >= kvm->nmemslots) | ||
315 | kvm->nmemslots = mem->slot + 1; | ||
316 | |||
317 | *memslot = new; | ||
318 | |||
319 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); | ||
320 | if (r) { | ||
321 | *memslot = old; | ||
322 | goto out_free; | ||
323 | } | ||
324 | |||
325 | kvm_free_physmem_slot(&old, &new); | ||
326 | return 0; | ||
327 | |||
328 | out_free: | ||
329 | kvm_free_physmem_slot(&new, &old); | ||
330 | out: | ||
331 | return r; | ||
332 | |||
333 | } | ||
334 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); | ||
335 | |||
336 | int kvm_set_memory_region(struct kvm *kvm, | ||
337 | struct kvm_userspace_memory_region *mem, | ||
338 | int user_alloc) | ||
339 | { | ||
340 | int r; | ||
341 | |||
342 | down_write(¤t->mm->mmap_sem); | ||
343 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | ||
344 | up_write(¤t->mm->mmap_sem); | ||
345 | return r; | ||
346 | } | ||
347 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | ||
348 | |||
349 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | ||
350 | struct | ||
351 | kvm_userspace_memory_region *mem, | ||
352 | int user_alloc) | ||
353 | { | ||
354 | if (mem->slot >= KVM_MEMORY_SLOTS) | ||
355 | return -EINVAL; | ||
356 | return kvm_set_memory_region(kvm, mem, user_alloc); | ||
357 | } | ||
358 | |||
359 | int kvm_get_dirty_log(struct kvm *kvm, | ||
360 | struct kvm_dirty_log *log, int *is_dirty) | ||
361 | { | ||
362 | struct kvm_memory_slot *memslot; | ||
363 | int r, i; | ||
364 | int n; | ||
365 | unsigned long any = 0; | ||
366 | |||
367 | r = -EINVAL; | ||
368 | if (log->slot >= KVM_MEMORY_SLOTS) | ||
369 | goto out; | ||
370 | |||
371 | memslot = &kvm->memslots[log->slot]; | ||
372 | r = -ENOENT; | ||
373 | if (!memslot->dirty_bitmap) | ||
374 | goto out; | ||
375 | |||
376 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
377 | |||
378 | for (i = 0; !any && i < n/sizeof(long); ++i) | ||
379 | any = memslot->dirty_bitmap[i]; | ||
380 | |||
381 | r = -EFAULT; | ||
382 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | ||
383 | goto out; | ||
384 | |||
385 | if (any) | ||
386 | *is_dirty = 1; | ||
387 | |||
388 | r = 0; | ||
389 | out: | ||
390 | return r; | ||
391 | } | ||
392 | |||
393 | int is_error_page(struct page *page) | ||
394 | { | ||
395 | return page == bad_page; | ||
396 | } | ||
397 | EXPORT_SYMBOL_GPL(is_error_page); | ||
398 | |||
399 | static inline unsigned long bad_hva(void) | ||
400 | { | ||
401 | return PAGE_OFFSET; | ||
402 | } | ||
403 | |||
404 | int kvm_is_error_hva(unsigned long addr) | ||
405 | { | ||
406 | return addr == bad_hva(); | ||
407 | } | ||
408 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); | ||
409 | |||
410 | static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | ||
411 | { | ||
412 | int i; | ||
413 | |||
414 | for (i = 0; i < kvm->nmemslots; ++i) { | ||
415 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | ||
416 | |||
417 | if (gfn >= memslot->base_gfn | ||
418 | && gfn < memslot->base_gfn + memslot->npages) | ||
419 | return memslot; | ||
420 | } | ||
421 | return NULL; | ||
422 | } | ||
423 | |||
424 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | ||
425 | { | ||
426 | gfn = unalias_gfn(kvm, gfn); | ||
427 | return __gfn_to_memslot(kvm, gfn); | ||
428 | } | ||
429 | |||
430 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | ||
431 | { | ||
432 | int i; | ||
433 | |||
434 | gfn = unalias_gfn(kvm, gfn); | ||
435 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
436 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | ||
437 | |||
438 | if (gfn >= memslot->base_gfn | ||
439 | && gfn < memslot->base_gfn + memslot->npages) | ||
440 | return 1; | ||
441 | } | ||
442 | return 0; | ||
443 | } | ||
444 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); | ||
445 | |||
446 | static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | ||
447 | { | ||
448 | struct kvm_memory_slot *slot; | ||
449 | |||
450 | gfn = unalias_gfn(kvm, gfn); | ||
451 | slot = __gfn_to_memslot(kvm, gfn); | ||
452 | if (!slot) | ||
453 | return bad_hva(); | ||
454 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Requires current->mm->mmap_sem to be held | ||
459 | */ | ||
460 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
461 | { | ||
462 | struct page *page[1]; | ||
463 | unsigned long addr; | ||
464 | int npages; | ||
465 | |||
466 | might_sleep(); | ||
467 | |||
468 | addr = gfn_to_hva(kvm, gfn); | ||
469 | if (kvm_is_error_hva(addr)) { | ||
470 | get_page(bad_page); | ||
471 | return bad_page; | ||
472 | } | ||
473 | |||
474 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, | ||
475 | NULL); | ||
476 | |||
477 | if (npages != 1) { | ||
478 | get_page(bad_page); | ||
479 | return bad_page; | ||
480 | } | ||
481 | |||
482 | return page[0]; | ||
483 | } | ||
484 | |||
485 | EXPORT_SYMBOL_GPL(gfn_to_page); | ||
486 | |||
487 | void kvm_release_page_clean(struct page *page) | ||
488 | { | ||
489 | put_page(page); | ||
490 | } | ||
491 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); | ||
492 | |||
493 | void kvm_release_page_dirty(struct page *page) | ||
494 | { | ||
495 | if (!PageReserved(page)) | ||
496 | SetPageDirty(page); | ||
497 | put_page(page); | ||
498 | } | ||
499 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | ||
500 | |||
501 | static int next_segment(unsigned long len, int offset) | ||
502 | { | ||
503 | if (len > PAGE_SIZE - offset) | ||
504 | return PAGE_SIZE - offset; | ||
505 | else | ||
506 | return len; | ||
507 | } | ||
508 | |||
509 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | ||
510 | int len) | ||
511 | { | ||
512 | int r; | ||
513 | unsigned long addr; | ||
514 | |||
515 | addr = gfn_to_hva(kvm, gfn); | ||
516 | if (kvm_is_error_hva(addr)) | ||
517 | return -EFAULT; | ||
518 | r = copy_from_user(data, (void __user *)addr + offset, len); | ||
519 | if (r) | ||
520 | return -EFAULT; | ||
521 | return 0; | ||
522 | } | ||
523 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); | ||
524 | |||
525 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) | ||
526 | { | ||
527 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
528 | int seg; | ||
529 | int offset = offset_in_page(gpa); | ||
530 | int ret; | ||
531 | |||
532 | while ((seg = next_segment(len, offset)) != 0) { | ||
533 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); | ||
534 | if (ret < 0) | ||
535 | return ret; | ||
536 | offset = 0; | ||
537 | len -= seg; | ||
538 | data += seg; | ||
539 | ++gfn; | ||
540 | } | ||
541 | return 0; | ||
542 | } | ||
543 | EXPORT_SYMBOL_GPL(kvm_read_guest); | ||
544 | |||
545 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | ||
546 | unsigned long len) | ||
547 | { | ||
548 | int r; | ||
549 | unsigned long addr; | ||
550 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
551 | int offset = offset_in_page(gpa); | ||
552 | |||
553 | addr = gfn_to_hva(kvm, gfn); | ||
554 | if (kvm_is_error_hva(addr)) | ||
555 | return -EFAULT; | ||
556 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); | ||
557 | if (r) | ||
558 | return -EFAULT; | ||
559 | return 0; | ||
560 | } | ||
561 | EXPORT_SYMBOL(kvm_read_guest_atomic); | ||
562 | |||
563 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | ||
564 | int offset, int len) | ||
565 | { | ||
566 | int r; | ||
567 | unsigned long addr; | ||
568 | |||
569 | addr = gfn_to_hva(kvm, gfn); | ||
570 | if (kvm_is_error_hva(addr)) | ||
571 | return -EFAULT; | ||
572 | r = copy_to_user((void __user *)addr + offset, data, len); | ||
573 | if (r) | ||
574 | return -EFAULT; | ||
575 | mark_page_dirty(kvm, gfn); | ||
576 | return 0; | ||
577 | } | ||
578 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); | ||
579 | |||
580 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | ||
581 | unsigned long len) | ||
582 | { | ||
583 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
584 | int seg; | ||
585 | int offset = offset_in_page(gpa); | ||
586 | int ret; | ||
587 | |||
588 | while ((seg = next_segment(len, offset)) != 0) { | ||
589 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); | ||
590 | if (ret < 0) | ||
591 | return ret; | ||
592 | offset = 0; | ||
593 | len -= seg; | ||
594 | data += seg; | ||
595 | ++gfn; | ||
596 | } | ||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) | ||
601 | { | ||
602 | return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); | ||
603 | } | ||
604 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); | ||
605 | |||
606 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | ||
607 | { | ||
608 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
609 | int seg; | ||
610 | int offset = offset_in_page(gpa); | ||
611 | int ret; | ||
612 | |||
613 | while ((seg = next_segment(len, offset)) != 0) { | ||
614 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); | ||
615 | if (ret < 0) | ||
616 | return ret; | ||
617 | offset = 0; | ||
618 | len -= seg; | ||
619 | ++gfn; | ||
620 | } | ||
621 | return 0; | ||
622 | } | ||
623 | EXPORT_SYMBOL_GPL(kvm_clear_guest); | ||
624 | |||
625 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | ||
626 | { | ||
627 | struct kvm_memory_slot *memslot; | ||
628 | |||
629 | gfn = unalias_gfn(kvm, gfn); | ||
630 | memslot = __gfn_to_memslot(kvm, gfn); | ||
631 | if (memslot && memslot->dirty_bitmap) { | ||
632 | unsigned long rel_gfn = gfn - memslot->base_gfn; | ||
633 | |||
634 | /* avoid RMW */ | ||
635 | if (!test_bit(rel_gfn, memslot->dirty_bitmap)) | ||
636 | set_bit(rel_gfn, memslot->dirty_bitmap); | ||
637 | } | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. | ||
642 | */ | ||
643 | void kvm_vcpu_block(struct kvm_vcpu *vcpu) | ||
644 | { | ||
645 | DECLARE_WAITQUEUE(wait, current); | ||
646 | |||
647 | add_wait_queue(&vcpu->wq, &wait); | ||
648 | |||
649 | /* | ||
650 | * We will block until either an interrupt or a signal wakes us up | ||
651 | */ | ||
652 | while (!kvm_cpu_has_interrupt(vcpu) | ||
653 | && !signal_pending(current) | ||
654 | && !kvm_arch_vcpu_runnable(vcpu)) { | ||
655 | set_current_state(TASK_INTERRUPTIBLE); | ||
656 | vcpu_put(vcpu); | ||
657 | schedule(); | ||
658 | vcpu_load(vcpu); | ||
659 | } | ||
660 | |||
661 | __set_current_state(TASK_RUNNING); | ||
662 | remove_wait_queue(&vcpu->wq, &wait); | ||
663 | } | ||
664 | |||
665 | void kvm_resched(struct kvm_vcpu *vcpu) | ||
666 | { | ||
667 | if (!need_resched()) | ||
668 | return; | ||
669 | cond_resched(); | ||
670 | } | ||
671 | EXPORT_SYMBOL_GPL(kvm_resched); | ||
672 | |||
673 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
674 | { | ||
675 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; | ||
676 | struct page *page; | ||
677 | |||
678 | if (vmf->pgoff == 0) | ||
679 | page = virt_to_page(vcpu->run); | ||
680 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) | ||
681 | page = virt_to_page(vcpu->arch.pio_data); | ||
682 | else | ||
683 | return VM_FAULT_SIGBUS; | ||
684 | get_page(page); | ||
685 | vmf->page = page; | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static struct vm_operations_struct kvm_vcpu_vm_ops = { | ||
690 | .fault = kvm_vcpu_fault, | ||
691 | }; | ||
692 | |||
693 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) | ||
694 | { | ||
695 | vma->vm_ops = &kvm_vcpu_vm_ops; | ||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) | ||
700 | { | ||
701 | struct kvm_vcpu *vcpu = filp->private_data; | ||
702 | |||
703 | fput(vcpu->kvm->filp); | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static struct file_operations kvm_vcpu_fops = { | ||
708 | .release = kvm_vcpu_release, | ||
709 | .unlocked_ioctl = kvm_vcpu_ioctl, | ||
710 | .compat_ioctl = kvm_vcpu_ioctl, | ||
711 | .mmap = kvm_vcpu_mmap, | ||
712 | }; | ||
713 | |||
714 | /* | ||
715 | * Allocates an inode for the vcpu. | ||
716 | */ | ||
717 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) | ||
718 | { | ||
719 | int fd, r; | ||
720 | struct inode *inode; | ||
721 | struct file *file; | ||
722 | |||
723 | r = anon_inode_getfd(&fd, &inode, &file, | ||
724 | "kvm-vcpu", &kvm_vcpu_fops, vcpu); | ||
725 | if (r) | ||
726 | return r; | ||
727 | atomic_inc(&vcpu->kvm->filp->f_count); | ||
728 | return fd; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Creates some virtual cpus. Good luck creating more than one. | ||
733 | */ | ||
734 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | ||
735 | { | ||
736 | int r; | ||
737 | struct kvm_vcpu *vcpu; | ||
738 | |||
739 | if (!valid_vcpu(n)) | ||
740 | return -EINVAL; | ||
741 | |||
742 | vcpu = kvm_arch_vcpu_create(kvm, n); | ||
743 | if (IS_ERR(vcpu)) | ||
744 | return PTR_ERR(vcpu); | ||
745 | |||
746 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); | ||
747 | |||
748 | r = kvm_arch_vcpu_setup(vcpu); | ||
749 | if (r) | ||
750 | goto vcpu_destroy; | ||
751 | |||
752 | mutex_lock(&kvm->lock); | ||
753 | if (kvm->vcpus[n]) { | ||
754 | r = -EEXIST; | ||
755 | mutex_unlock(&kvm->lock); | ||
756 | goto vcpu_destroy; | ||
757 | } | ||
758 | kvm->vcpus[n] = vcpu; | ||
759 | mutex_unlock(&kvm->lock); | ||
760 | |||
761 | /* Now it's all set up, let userspace reach it */ | ||
762 | r = create_vcpu_fd(vcpu); | ||
763 | if (r < 0) | ||
764 | goto unlink; | ||
765 | return r; | ||
766 | |||
767 | unlink: | ||
768 | mutex_lock(&kvm->lock); | ||
769 | kvm->vcpus[n] = NULL; | ||
770 | mutex_unlock(&kvm->lock); | ||
771 | vcpu_destroy: | ||
772 | kvm_arch_vcpu_destroy(vcpu); | ||
773 | return r; | ||
774 | } | ||
775 | |||
776 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | ||
777 | { | ||
778 | if (sigset) { | ||
779 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
780 | vcpu->sigset_active = 1; | ||
781 | vcpu->sigset = *sigset; | ||
782 | } else | ||
783 | vcpu->sigset_active = 0; | ||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | static long kvm_vcpu_ioctl(struct file *filp, | ||
788 | unsigned int ioctl, unsigned long arg) | ||
789 | { | ||
790 | struct kvm_vcpu *vcpu = filp->private_data; | ||
791 | void __user *argp = (void __user *)arg; | ||
792 | int r; | ||
793 | |||
794 | if (vcpu->kvm->mm != current->mm) | ||
795 | return -EIO; | ||
796 | switch (ioctl) { | ||
797 | case KVM_RUN: | ||
798 | r = -EINVAL; | ||
799 | if (arg) | ||
800 | goto out; | ||
801 | r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); | ||
802 | break; | ||
803 | case KVM_GET_REGS: { | ||
804 | struct kvm_regs kvm_regs; | ||
805 | |||
806 | memset(&kvm_regs, 0, sizeof kvm_regs); | ||
807 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs); | ||
808 | if (r) | ||
809 | goto out; | ||
810 | r = -EFAULT; | ||
811 | if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs)) | ||
812 | goto out; | ||
813 | r = 0; | ||
814 | break; | ||
815 | } | ||
816 | case KVM_SET_REGS: { | ||
817 | struct kvm_regs kvm_regs; | ||
818 | |||
819 | r = -EFAULT; | ||
820 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) | ||
821 | goto out; | ||
822 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs); | ||
823 | if (r) | ||
824 | goto out; | ||
825 | r = 0; | ||
826 | break; | ||
827 | } | ||
828 | case KVM_GET_SREGS: { | ||
829 | struct kvm_sregs kvm_sregs; | ||
830 | |||
831 | memset(&kvm_sregs, 0, sizeof kvm_sregs); | ||
832 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs); | ||
833 | if (r) | ||
834 | goto out; | ||
835 | r = -EFAULT; | ||
836 | if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs)) | ||
837 | goto out; | ||
838 | r = 0; | ||
839 | break; | ||
840 | } | ||
841 | case KVM_SET_SREGS: { | ||
842 | struct kvm_sregs kvm_sregs; | ||
843 | |||
844 | r = -EFAULT; | ||
845 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) | ||
846 | goto out; | ||
847 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs); | ||
848 | if (r) | ||
849 | goto out; | ||
850 | r = 0; | ||
851 | break; | ||
852 | } | ||
853 | case KVM_TRANSLATE: { | ||
854 | struct kvm_translation tr; | ||
855 | |||
856 | r = -EFAULT; | ||
857 | if (copy_from_user(&tr, argp, sizeof tr)) | ||
858 | goto out; | ||
859 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); | ||
860 | if (r) | ||
861 | goto out; | ||
862 | r = -EFAULT; | ||
863 | if (copy_to_user(argp, &tr, sizeof tr)) | ||
864 | goto out; | ||
865 | r = 0; | ||
866 | break; | ||
867 | } | ||
868 | case KVM_DEBUG_GUEST: { | ||
869 | struct kvm_debug_guest dbg; | ||
870 | |||
871 | r = -EFAULT; | ||
872 | if (copy_from_user(&dbg, argp, sizeof dbg)) | ||
873 | goto out; | ||
874 | r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg); | ||
875 | if (r) | ||
876 | goto out; | ||
877 | r = 0; | ||
878 | break; | ||
879 | } | ||
880 | case KVM_SET_SIGNAL_MASK: { | ||
881 | struct kvm_signal_mask __user *sigmask_arg = argp; | ||
882 | struct kvm_signal_mask kvm_sigmask; | ||
883 | sigset_t sigset, *p; | ||
884 | |||
885 | p = NULL; | ||
886 | if (argp) { | ||
887 | r = -EFAULT; | ||
888 | if (copy_from_user(&kvm_sigmask, argp, | ||
889 | sizeof kvm_sigmask)) | ||
890 | goto out; | ||
891 | r = -EINVAL; | ||
892 | if (kvm_sigmask.len != sizeof sigset) | ||
893 | goto out; | ||
894 | r = -EFAULT; | ||
895 | if (copy_from_user(&sigset, sigmask_arg->sigset, | ||
896 | sizeof sigset)) | ||
897 | goto out; | ||
898 | p = &sigset; | ||
899 | } | ||
900 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); | ||
901 | break; | ||
902 | } | ||
903 | case KVM_GET_FPU: { | ||
904 | struct kvm_fpu fpu; | ||
905 | |||
906 | memset(&fpu, 0, sizeof fpu); | ||
907 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu); | ||
908 | if (r) | ||
909 | goto out; | ||
910 | r = -EFAULT; | ||
911 | if (copy_to_user(argp, &fpu, sizeof fpu)) | ||
912 | goto out; | ||
913 | r = 0; | ||
914 | break; | ||
915 | } | ||
916 | case KVM_SET_FPU: { | ||
917 | struct kvm_fpu fpu; | ||
918 | |||
919 | r = -EFAULT; | ||
920 | if (copy_from_user(&fpu, argp, sizeof fpu)) | ||
921 | goto out; | ||
922 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu); | ||
923 | if (r) | ||
924 | goto out; | ||
925 | r = 0; | ||
926 | break; | ||
927 | } | ||
928 | default: | ||
929 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); | ||
930 | } | ||
931 | out: | ||
932 | return r; | ||
933 | } | ||
934 | |||
935 | static long kvm_vm_ioctl(struct file *filp, | ||
936 | unsigned int ioctl, unsigned long arg) | ||
937 | { | ||
938 | struct kvm *kvm = filp->private_data; | ||
939 | void __user *argp = (void __user *)arg; | ||
940 | int r; | ||
941 | |||
942 | if (kvm->mm != current->mm) | ||
943 | return -EIO; | ||
944 | switch (ioctl) { | ||
945 | case KVM_CREATE_VCPU: | ||
946 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); | ||
947 | if (r < 0) | ||
948 | goto out; | ||
949 | break; | ||
950 | case KVM_SET_USER_MEMORY_REGION: { | ||
951 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
952 | |||
953 | r = -EFAULT; | ||
954 | if (copy_from_user(&kvm_userspace_mem, argp, | ||
955 | sizeof kvm_userspace_mem)) | ||
956 | goto out; | ||
957 | |||
958 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); | ||
959 | if (r) | ||
960 | goto out; | ||
961 | break; | ||
962 | } | ||
963 | case KVM_GET_DIRTY_LOG: { | ||
964 | struct kvm_dirty_log log; | ||
965 | |||
966 | r = -EFAULT; | ||
967 | if (copy_from_user(&log, argp, sizeof log)) | ||
968 | goto out; | ||
969 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); | ||
970 | if (r) | ||
971 | goto out; | ||
972 | break; | ||
973 | } | ||
974 | default: | ||
975 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | ||
976 | } | ||
977 | out: | ||
978 | return r; | ||
979 | } | ||
980 | |||
981 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
982 | { | ||
983 | struct kvm *kvm = vma->vm_file->private_data; | ||
984 | struct page *page; | ||
985 | |||
986 | if (!kvm_is_visible_gfn(kvm, vmf->pgoff)) | ||
987 | return VM_FAULT_SIGBUS; | ||
988 | page = gfn_to_page(kvm, vmf->pgoff); | ||
989 | if (is_error_page(page)) { | ||
990 | kvm_release_page_clean(page); | ||
991 | return VM_FAULT_SIGBUS; | ||
992 | } | ||
993 | vmf->page = page; | ||
994 | return 0; | ||
995 | } | ||
996 | |||
997 | static struct vm_operations_struct kvm_vm_vm_ops = { | ||
998 | .fault = kvm_vm_fault, | ||
999 | }; | ||
1000 | |||
1001 | static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | ||
1002 | { | ||
1003 | vma->vm_ops = &kvm_vm_vm_ops; | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | static struct file_operations kvm_vm_fops = { | ||
1008 | .release = kvm_vm_release, | ||
1009 | .unlocked_ioctl = kvm_vm_ioctl, | ||
1010 | .compat_ioctl = kvm_vm_ioctl, | ||
1011 | .mmap = kvm_vm_mmap, | ||
1012 | }; | ||
1013 | |||
1014 | static int kvm_dev_ioctl_create_vm(void) | ||
1015 | { | ||
1016 | int fd, r; | ||
1017 | struct inode *inode; | ||
1018 | struct file *file; | ||
1019 | struct kvm *kvm; | ||
1020 | |||
1021 | kvm = kvm_create_vm(); | ||
1022 | if (IS_ERR(kvm)) | ||
1023 | return PTR_ERR(kvm); | ||
1024 | r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm); | ||
1025 | if (r) { | ||
1026 | kvm_destroy_vm(kvm); | ||
1027 | return r; | ||
1028 | } | ||
1029 | |||
1030 | kvm->filp = file; | ||
1031 | |||
1032 | return fd; | ||
1033 | } | ||
1034 | |||
1035 | static long kvm_dev_ioctl(struct file *filp, | ||
1036 | unsigned int ioctl, unsigned long arg) | ||
1037 | { | ||
1038 | void __user *argp = (void __user *)arg; | ||
1039 | long r = -EINVAL; | ||
1040 | |||
1041 | switch (ioctl) { | ||
1042 | case KVM_GET_API_VERSION: | ||
1043 | r = -EINVAL; | ||
1044 | if (arg) | ||
1045 | goto out; | ||
1046 | r = KVM_API_VERSION; | ||
1047 | break; | ||
1048 | case KVM_CREATE_VM: | ||
1049 | r = -EINVAL; | ||
1050 | if (arg) | ||
1051 | goto out; | ||
1052 | r = kvm_dev_ioctl_create_vm(); | ||
1053 | break; | ||
1054 | case KVM_CHECK_EXTENSION: | ||
1055 | r = kvm_dev_ioctl_check_extension((long)argp); | ||
1056 | break; | ||
1057 | case KVM_GET_VCPU_MMAP_SIZE: | ||
1058 | r = -EINVAL; | ||
1059 | if (arg) | ||
1060 | goto out; | ||
1061 | r = 2 * PAGE_SIZE; | ||
1062 | break; | ||
1063 | default: | ||
1064 | return kvm_arch_dev_ioctl(filp, ioctl, arg); | ||
1065 | } | ||
1066 | out: | ||
1067 | return r; | ||
1068 | } | ||
1069 | |||
1070 | static struct file_operations kvm_chardev_ops = { | ||
1071 | .unlocked_ioctl = kvm_dev_ioctl, | ||
1072 | .compat_ioctl = kvm_dev_ioctl, | ||
1073 | }; | ||
1074 | |||
1075 | static struct miscdevice kvm_dev = { | ||
1076 | KVM_MINOR, | ||
1077 | "kvm", | ||
1078 | &kvm_chardev_ops, | ||
1079 | }; | ||
1080 | |||
1081 | static void hardware_enable(void *junk) | ||
1082 | { | ||
1083 | int cpu = raw_smp_processor_id(); | ||
1084 | |||
1085 | if (cpu_isset(cpu, cpus_hardware_enabled)) | ||
1086 | return; | ||
1087 | cpu_set(cpu, cpus_hardware_enabled); | ||
1088 | kvm_arch_hardware_enable(NULL); | ||
1089 | } | ||
1090 | |||
1091 | static void hardware_disable(void *junk) | ||
1092 | { | ||
1093 | int cpu = raw_smp_processor_id(); | ||
1094 | |||
1095 | if (!cpu_isset(cpu, cpus_hardware_enabled)) | ||
1096 | return; | ||
1097 | cpu_clear(cpu, cpus_hardware_enabled); | ||
1098 | decache_vcpus_on_cpu(cpu); | ||
1099 | kvm_arch_hardware_disable(NULL); | ||
1100 | } | ||
1101 | |||
1102 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | ||
1103 | void *v) | ||
1104 | { | ||
1105 | int cpu = (long)v; | ||
1106 | |||
1107 | val &= ~CPU_TASKS_FROZEN; | ||
1108 | switch (val) { | ||
1109 | case CPU_DYING: | ||
1110 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | ||
1111 | cpu); | ||
1112 | hardware_disable(NULL); | ||
1113 | break; | ||
1114 | case CPU_UP_CANCELED: | ||
1115 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | ||
1116 | cpu); | ||
1117 | smp_call_function_single(cpu, hardware_disable, NULL, 0, 1); | ||
1118 | break; | ||
1119 | case CPU_ONLINE: | ||
1120 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | ||
1121 | cpu); | ||
1122 | smp_call_function_single(cpu, hardware_enable, NULL, 0, 1); | ||
1123 | break; | ||
1124 | } | ||
1125 | return NOTIFY_OK; | ||
1126 | } | ||
1127 | |||
1128 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | ||
1129 | void *v) | ||
1130 | { | ||
1131 | if (val == SYS_RESTART) { | ||
1132 | /* | ||
1133 | * Some (well, at least mine) BIOSes hang on reboot if | ||
1134 | * in vmx root mode. | ||
1135 | */ | ||
1136 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | ||
1137 | on_each_cpu(hardware_disable, NULL, 0, 1); | ||
1138 | } | ||
1139 | return NOTIFY_OK; | ||
1140 | } | ||
1141 | |||
1142 | static struct notifier_block kvm_reboot_notifier = { | ||
1143 | .notifier_call = kvm_reboot, | ||
1144 | .priority = 0, | ||
1145 | }; | ||
1146 | |||
1147 | void kvm_io_bus_init(struct kvm_io_bus *bus) | ||
1148 | { | ||
1149 | memset(bus, 0, sizeof(*bus)); | ||
1150 | } | ||
1151 | |||
1152 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) | ||
1153 | { | ||
1154 | int i; | ||
1155 | |||
1156 | for (i = 0; i < bus->dev_count; i++) { | ||
1157 | struct kvm_io_device *pos = bus->devs[i]; | ||
1158 | |||
1159 | kvm_iodevice_destructor(pos); | ||
1160 | } | ||
1161 | } | ||
1162 | |||
1163 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) | ||
1164 | { | ||
1165 | int i; | ||
1166 | |||
1167 | for (i = 0; i < bus->dev_count; i++) { | ||
1168 | struct kvm_io_device *pos = bus->devs[i]; | ||
1169 | |||
1170 | if (pos->in_range(pos, addr)) | ||
1171 | return pos; | ||
1172 | } | ||
1173 | |||
1174 | return NULL; | ||
1175 | } | ||
1176 | |||
1177 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) | ||
1178 | { | ||
1179 | BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); | ||
1180 | |||
1181 | bus->devs[bus->dev_count++] = dev; | ||
1182 | } | ||
1183 | |||
1184 | static struct notifier_block kvm_cpu_notifier = { | ||
1185 | .notifier_call = kvm_cpu_hotplug, | ||
1186 | .priority = 20, /* must be > scheduler priority */ | ||
1187 | }; | ||
1188 | |||
1189 | static u64 vm_stat_get(void *_offset) | ||
1190 | { | ||
1191 | unsigned offset = (long)_offset; | ||
1192 | u64 total = 0; | ||
1193 | struct kvm *kvm; | ||
1194 | |||
1195 | spin_lock(&kvm_lock); | ||
1196 | list_for_each_entry(kvm, &vm_list, vm_list) | ||
1197 | total += *(u32 *)((void *)kvm + offset); | ||
1198 | spin_unlock(&kvm_lock); | ||
1199 | return total; | ||
1200 | } | ||
1201 | |||
1202 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); | ||
1203 | |||
1204 | static u64 vcpu_stat_get(void *_offset) | ||
1205 | { | ||
1206 | unsigned offset = (long)_offset; | ||
1207 | u64 total = 0; | ||
1208 | struct kvm *kvm; | ||
1209 | struct kvm_vcpu *vcpu; | ||
1210 | int i; | ||
1211 | |||
1212 | spin_lock(&kvm_lock); | ||
1213 | list_for_each_entry(kvm, &vm_list, vm_list) | ||
1214 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
1215 | vcpu = kvm->vcpus[i]; | ||
1216 | if (vcpu) | ||
1217 | total += *(u32 *)((void *)vcpu + offset); | ||
1218 | } | ||
1219 | spin_unlock(&kvm_lock); | ||
1220 | return total; | ||
1221 | } | ||
1222 | |||
1223 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); | ||
1224 | |||
1225 | static struct file_operations *stat_fops[] = { | ||
1226 | [KVM_STAT_VCPU] = &vcpu_stat_fops, | ||
1227 | [KVM_STAT_VM] = &vm_stat_fops, | ||
1228 | }; | ||
1229 | |||
1230 | static void kvm_init_debug(void) | ||
1231 | { | ||
1232 | struct kvm_stats_debugfs_item *p; | ||
1233 | |||
1234 | debugfs_dir = debugfs_create_dir("kvm", NULL); | ||
1235 | for (p = debugfs_entries; p->name; ++p) | ||
1236 | p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir, | ||
1237 | (void *)(long)p->offset, | ||
1238 | stat_fops[p->kind]); | ||
1239 | } | ||
1240 | |||
1241 | static void kvm_exit_debug(void) | ||
1242 | { | ||
1243 | struct kvm_stats_debugfs_item *p; | ||
1244 | |||
1245 | for (p = debugfs_entries; p->name; ++p) | ||
1246 | debugfs_remove(p->dentry); | ||
1247 | debugfs_remove(debugfs_dir); | ||
1248 | } | ||
1249 | |||
1250 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) | ||
1251 | { | ||
1252 | hardware_disable(NULL); | ||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | static int kvm_resume(struct sys_device *dev) | ||
1257 | { | ||
1258 | hardware_enable(NULL); | ||
1259 | return 0; | ||
1260 | } | ||
1261 | |||
1262 | static struct sysdev_class kvm_sysdev_class = { | ||
1263 | .name = "kvm", | ||
1264 | .suspend = kvm_suspend, | ||
1265 | .resume = kvm_resume, | ||
1266 | }; | ||
1267 | |||
1268 | static struct sys_device kvm_sysdev = { | ||
1269 | .id = 0, | ||
1270 | .cls = &kvm_sysdev_class, | ||
1271 | }; | ||
1272 | |||
1273 | struct page *bad_page; | ||
1274 | |||
1275 | static inline | ||
1276 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | ||
1277 | { | ||
1278 | return container_of(pn, struct kvm_vcpu, preempt_notifier); | ||
1279 | } | ||
1280 | |||
1281 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | ||
1282 | { | ||
1283 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | ||
1284 | |||
1285 | kvm_arch_vcpu_load(vcpu, cpu); | ||
1286 | } | ||
1287 | |||
1288 | static void kvm_sched_out(struct preempt_notifier *pn, | ||
1289 | struct task_struct *next) | ||
1290 | { | ||
1291 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | ||
1292 | |||
1293 | kvm_arch_vcpu_put(vcpu); | ||
1294 | } | ||
1295 | |||
1296 | int kvm_init(void *opaque, unsigned int vcpu_size, | ||
1297 | struct module *module) | ||
1298 | { | ||
1299 | int r; | ||
1300 | int cpu; | ||
1301 | |||
1302 | kvm_init_debug(); | ||
1303 | |||
1304 | r = kvm_arch_init(opaque); | ||
1305 | if (r) | ||
1306 | goto out_fail; | ||
1307 | |||
1308 | bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
1309 | |||
1310 | if (bad_page == NULL) { | ||
1311 | r = -ENOMEM; | ||
1312 | goto out; | ||
1313 | } | ||
1314 | |||
1315 | r = kvm_arch_hardware_setup(); | ||
1316 | if (r < 0) | ||
1317 | goto out_free_0; | ||
1318 | |||
1319 | for_each_online_cpu(cpu) { | ||
1320 | smp_call_function_single(cpu, | ||
1321 | kvm_arch_check_processor_compat, | ||
1322 | &r, 0, 1); | ||
1323 | if (r < 0) | ||
1324 | goto out_free_1; | ||
1325 | } | ||
1326 | |||
1327 | on_each_cpu(hardware_enable, NULL, 0, 1); | ||
1328 | r = register_cpu_notifier(&kvm_cpu_notifier); | ||
1329 | if (r) | ||
1330 | goto out_free_2; | ||
1331 | register_reboot_notifier(&kvm_reboot_notifier); | ||
1332 | |||
1333 | r = sysdev_class_register(&kvm_sysdev_class); | ||
1334 | if (r) | ||
1335 | goto out_free_3; | ||
1336 | |||
1337 | r = sysdev_register(&kvm_sysdev); | ||
1338 | if (r) | ||
1339 | goto out_free_4; | ||
1340 | |||
1341 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ | ||
1342 | kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, | ||
1343 | __alignof__(struct kvm_vcpu), | ||
1344 | 0, NULL); | ||
1345 | if (!kvm_vcpu_cache) { | ||
1346 | r = -ENOMEM; | ||
1347 | goto out_free_5; | ||
1348 | } | ||
1349 | |||
1350 | kvm_chardev_ops.owner = module; | ||
1351 | |||
1352 | r = misc_register(&kvm_dev); | ||
1353 | if (r) { | ||
1354 | printk(KERN_ERR "kvm: misc device register failed\n"); | ||
1355 | goto out_free; | ||
1356 | } | ||
1357 | |||
1358 | kvm_preempt_ops.sched_in = kvm_sched_in; | ||
1359 | kvm_preempt_ops.sched_out = kvm_sched_out; | ||
1360 | |||
1361 | return 0; | ||
1362 | |||
1363 | out_free: | ||
1364 | kmem_cache_destroy(kvm_vcpu_cache); | ||
1365 | out_free_5: | ||
1366 | sysdev_unregister(&kvm_sysdev); | ||
1367 | out_free_4: | ||
1368 | sysdev_class_unregister(&kvm_sysdev_class); | ||
1369 | out_free_3: | ||
1370 | unregister_reboot_notifier(&kvm_reboot_notifier); | ||
1371 | unregister_cpu_notifier(&kvm_cpu_notifier); | ||
1372 | out_free_2: | ||
1373 | on_each_cpu(hardware_disable, NULL, 0, 1); | ||
1374 | out_free_1: | ||
1375 | kvm_arch_hardware_unsetup(); | ||
1376 | out_free_0: | ||
1377 | __free_page(bad_page); | ||
1378 | out: | ||
1379 | kvm_arch_exit(); | ||
1380 | kvm_exit_debug(); | ||
1381 | out_fail: | ||
1382 | return r; | ||
1383 | } | ||
1384 | EXPORT_SYMBOL_GPL(kvm_init); | ||
1385 | |||
1386 | void kvm_exit(void) | ||
1387 | { | ||
1388 | misc_deregister(&kvm_dev); | ||
1389 | kmem_cache_destroy(kvm_vcpu_cache); | ||
1390 | sysdev_unregister(&kvm_sysdev); | ||
1391 | sysdev_class_unregister(&kvm_sysdev_class); | ||
1392 | unregister_reboot_notifier(&kvm_reboot_notifier); | ||
1393 | unregister_cpu_notifier(&kvm_cpu_notifier); | ||
1394 | on_each_cpu(hardware_disable, NULL, 0, 1); | ||
1395 | kvm_arch_hardware_unsetup(); | ||
1396 | kvm_arch_exit(); | ||
1397 | kvm_exit_debug(); | ||
1398 | __free_page(bad_page); | ||
1399 | } | ||
1400 | EXPORT_SYMBOL_GPL(kvm_exit); | ||