aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-30 20:57:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-30 20:57:39 -0400
commitdb6f204019380c788f1de06ee937bdbccd60e5c0 (patch)
treef8ca32ab6932a21797dbc5aa77688ea017959da0 /drivers
parent3c6fae67d026d57f64eb3da9c0d0e76983e39ae3 (diff)
parentd1881d3192a3d3e8dc4f255b03187f4c36cb0617 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest-and-virtio
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest-and-virtio: lguest: barrier me harder lguest: use bool instead of int lguest: use KVM hypercalls lguest: wire up pte_update/pte_update_defer lguest: fix spurious BUG_ON() on invalid guest stack. virtio: more neatening of virtio_ring macros. virtio: fix BAD_RING, START_US and END_USE macros
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lguest/core.c4
-rw-r--r--drivers/lguest/interrupts_and_traps.c28
-rw-r--r--drivers/lguest/lg.h8
-rw-r--r--drivers/lguest/lguest_device.c4
-rw-r--r--drivers/lguest/page_tables.c22
-rw-r--r--drivers/lguest/segments.c2
-rw-r--r--drivers/lguest/x86/core.c62
-rw-r--r--drivers/virtio/virtio_ring.c22
8 files changed, 111 insertions, 41 deletions
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 60156dfdc608..4845fb3cf74b 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -152,8 +152,8 @@ static void unmap_switcher(void)
152 * code. We have to check that the range is below the pfn_limit the Launcher 152 * code. We have to check that the range is below the pfn_limit the Launcher
153 * gave us. We have to make sure that addr + len doesn't give us a false 153 * gave us. We have to make sure that addr + len doesn't give us a false
154 * positive by overflowing, too. */ 154 * positive by overflowing, too. */
155int lguest_address_ok(const struct lguest *lg, 155bool lguest_address_ok(const struct lguest *lg,
156 unsigned long addr, unsigned long len) 156 unsigned long addr, unsigned long len)
157{ 157{
158 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); 158 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
159} 159}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 415fab0125ac..6e99adbe1946 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -34,7 +34,7 @@ static int idt_type(u32 lo, u32 hi)
34} 34}
35 35
36/* An IDT entry can't be used unless the "present" bit is set. */ 36/* An IDT entry can't be used unless the "present" bit is set. */
37static int idt_present(u32 lo, u32 hi) 37static bool idt_present(u32 lo, u32 hi)
38{ 38{
39 return (hi & 0x8000); 39 return (hi & 0x8000);
40} 40}
@@ -60,7 +60,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
60 * We set up the stack just like the CPU does for a real interrupt, so it's 60 * We set up the stack just like the CPU does for a real interrupt, so it's
61 * identical for the Guest (and the standard "iret" instruction will undo 61 * identical for the Guest (and the standard "iret" instruction will undo
62 * it). */ 62 * it). */
63static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err) 63static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
64 bool has_err)
64{ 65{
65 unsigned long gstack, origstack; 66 unsigned long gstack, origstack;
66 u32 eflags, ss, irq_enable; 67 u32 eflags, ss, irq_enable;
@@ -184,7 +185,7 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
184 /* set_guest_interrupt() takes the interrupt descriptor and a 185 /* set_guest_interrupt() takes the interrupt descriptor and a
185 * flag to say whether this interrupt pushes an error code onto 186 * flag to say whether this interrupt pushes an error code onto
186 * the stack as well: virtual interrupts never do. */ 187 * the stack as well: virtual interrupts never do. */
187 set_guest_interrupt(cpu, idt->a, idt->b, 0); 188 set_guest_interrupt(cpu, idt->a, idt->b, false);
188 } 189 }
189 190
190 /* Every time we deliver an interrupt, we update the timestamp in the 191 /* Every time we deliver an interrupt, we update the timestamp in the
@@ -244,26 +245,26 @@ void free_interrupts(void)
244/*H:220 Now we've got the routines to deliver interrupts, delivering traps like 245/*H:220 Now we've got the routines to deliver interrupts, delivering traps like
245 * page fault is easy. The only trick is that Intel decided that some traps 246 * page fault is easy. The only trick is that Intel decided that some traps
246 * should have error codes: */ 247 * should have error codes: */
247static int has_err(unsigned int trap) 248static bool has_err(unsigned int trap)
248{ 249{
249 return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); 250 return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
250} 251}
251 252
252/* deliver_trap() returns true if it could deliver the trap. */ 253/* deliver_trap() returns true if it could deliver the trap. */
253int deliver_trap(struct lg_cpu *cpu, unsigned int num) 254bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
254{ 255{
255 /* Trap numbers are always 8 bit, but we set an impossible trap number 256 /* Trap numbers are always 8 bit, but we set an impossible trap number
256 * for traps inside the Switcher, so check that here. */ 257 * for traps inside the Switcher, so check that here. */
257 if (num >= ARRAY_SIZE(cpu->arch.idt)) 258 if (num >= ARRAY_SIZE(cpu->arch.idt))
258 return 0; 259 return false;
259 260
260 /* Early on the Guest hasn't set the IDT entries (or maybe it put a 261 /* Early on the Guest hasn't set the IDT entries (or maybe it put a
261 * bogus one in): if we fail here, the Guest will be killed. */ 262 * bogus one in): if we fail here, the Guest will be killed. */
262 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) 263 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
263 return 0; 264 return false;
264 set_guest_interrupt(cpu, cpu->arch.idt[num].a, 265 set_guest_interrupt(cpu, cpu->arch.idt[num].a,
265 cpu->arch.idt[num].b, has_err(num)); 266 cpu->arch.idt[num].b, has_err(num));
266 return 1; 267 return true;
267} 268}
268 269
269/*H:250 Here's the hard part: returning to the Host every time a trap happens 270/*H:250 Here's the hard part: returning to the Host every time a trap happens
@@ -279,18 +280,19 @@ int deliver_trap(struct lg_cpu *cpu, unsigned int num)
279 * 280 *
280 * This routine indicates if a particular trap number could be delivered 281 * This routine indicates if a particular trap number could be delivered
281 * directly. */ 282 * directly. */
282static int direct_trap(unsigned int num) 283static bool direct_trap(unsigned int num)
283{ 284{
284 /* Hardware interrupts don't go to the Guest at all (except system 285 /* Hardware interrupts don't go to the Guest at all (except system
285 * call). */ 286 * call). */
286 if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) 287 if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
287 return 0; 288 return false;
288 289
289 /* The Host needs to see page faults (for shadow paging and to save the 290 /* The Host needs to see page faults (for shadow paging and to save the
290 * fault address), general protection faults (in/out emulation) and 291 * fault address), general protection faults (in/out emulation) and
291 * device not available (TS handling), and of course, the hypercall 292 * device not available (TS handling), invalid opcode fault (kvm hcall),
292 * trap. */ 293 * and of course, the hypercall trap. */
293 return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY; 294 return num != 14 && num != 13 && num != 7 &&
295 num != 6 && num != LGUEST_TRAP_ENTRY;
294} 296}
295/*:*/ 297/*:*/
296 298
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index f2c641e0bdde..ac8a4a3741b8 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -109,8 +109,8 @@ struct lguest
109extern struct mutex lguest_lock; 109extern struct mutex lguest_lock;
110 110
111/* core.c: */ 111/* core.c: */
112int lguest_address_ok(const struct lguest *lg, 112bool lguest_address_ok(const struct lguest *lg,
113 unsigned long addr, unsigned long len); 113 unsigned long addr, unsigned long len);
114void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); 114void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
115void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); 115void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
116 116
@@ -140,7 +140,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
140 140
141/* interrupts_and_traps.c: */ 141/* interrupts_and_traps.c: */
142void maybe_do_interrupt(struct lg_cpu *cpu); 142void maybe_do_interrupt(struct lg_cpu *cpu);
143int deliver_trap(struct lg_cpu *cpu, unsigned int num); 143bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
144void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, 144void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
145 u32 low, u32 hi); 145 u32 low, u32 hi);
146void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages); 146void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages);
@@ -173,7 +173,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu);
173void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, 173void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
174 unsigned long vaddr, pte_t val); 174 unsigned long vaddr, pte_t val);
175void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); 175void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
176int demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); 176bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
177void pin_page(struct lg_cpu *cpu, unsigned long vaddr); 177void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
178unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); 178unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
179void page_table_guest_data_init(struct lg_cpu *cpu); 179void page_table_guest_data_init(struct lg_cpu *cpu);
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 8132533d71f9..df44d962626d 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -161,7 +161,7 @@ static void set_status(struct virtio_device *vdev, u8 status)
161 161
162 /* We set the status. */ 162 /* We set the status. */
163 to_lgdev(vdev)->desc->status = status; 163 to_lgdev(vdev)->desc->status = status;
164 hcall(LHCALL_NOTIFY, (max_pfn<<PAGE_SHIFT) + offset, 0, 0); 164 kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset);
165} 165}
166 166
167static void lg_set_status(struct virtio_device *vdev, u8 status) 167static void lg_set_status(struct virtio_device *vdev, u8 status)
@@ -209,7 +209,7 @@ static void lg_notify(struct virtqueue *vq)
209 * virtqueue structure. */ 209 * virtqueue structure. */
210 struct lguest_vq_info *lvq = vq->priv; 210 struct lguest_vq_info *lvq = vq->priv;
211 211
212 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0); 212 kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT);
213} 213}
214 214
215/* An extern declaration inside a C file is bad form. Don't do it. */ 215/* An extern declaration inside a C file is bad form. Don't do it. */
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 576a8318221c..a059cf9980f7 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -199,7 +199,7 @@ static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
199 * 199 *
200 * If we fixed up the fault (ie. we mapped the address), this routine returns 200 * If we fixed up the fault (ie. we mapped the address), this routine returns
201 * true. Otherwise, it was a real fault and we need to tell the Guest. */ 201 * true. Otherwise, it was a real fault and we need to tell the Guest. */
202int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 202bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
203{ 203{
204 pgd_t gpgd; 204 pgd_t gpgd;
205 pgd_t *spgd; 205 pgd_t *spgd;
@@ -211,7 +211,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
212 /* Toplevel not present? We can't map it in. */ 212 /* Toplevel not present? We can't map it in. */
213 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 213 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
214 return 0; 214 return false;
215 215
216 /* Now look at the matching shadow entry. */ 216 /* Now look at the matching shadow entry. */
217 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 217 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
@@ -222,7 +222,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
222 * simple for this corner case. */ 222 * simple for this corner case. */
223 if (!ptepage) { 223 if (!ptepage) {
224 kill_guest(cpu, "out of memory allocating pte page"); 224 kill_guest(cpu, "out of memory allocating pte page");
225 return 0; 225 return false;
226 } 226 }
227 /* We check that the Guest pgd is OK. */ 227 /* We check that the Guest pgd is OK. */
228 check_gpgd(cpu, gpgd); 228 check_gpgd(cpu, gpgd);
@@ -238,16 +238,16 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
238 238
239 /* If this page isn't in the Guest page tables, we can't page it in. */ 239 /* If this page isn't in the Guest page tables, we can't page it in. */
240 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 240 if (!(pte_flags(gpte) & _PAGE_PRESENT))
241 return 0; 241 return false;
242 242
243 /* Check they're not trying to write to a page the Guest wants 243 /* Check they're not trying to write to a page the Guest wants
244 * read-only (bit 2 of errcode == write). */ 244 * read-only (bit 2 of errcode == write). */
245 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) 245 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
246 return 0; 246 return false;
247 247
248 /* User access to a kernel-only page? (bit 3 == user access) */ 248 /* User access to a kernel-only page? (bit 3 == user access) */
249 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 249 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
250 return 0; 250 return false;
251 251
252 /* Check that the Guest PTE flags are OK, and the page number is below 252 /* Check that the Guest PTE flags are OK, and the page number is below
253 * the pfn_limit (ie. not mapping the Launcher binary). */ 253 * the pfn_limit (ie. not mapping the Launcher binary). */
@@ -283,7 +283,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
283 * manipulated, the result returned and the code complete. A small 283 * manipulated, the result returned and the code complete. A small
284 * delay and a trace of alliteration are the only indications the Guest 284 * delay and a trace of alliteration are the only indications the Guest
285 * has that a page fault occurred at all. */ 285 * has that a page fault occurred at all. */
286 return 1; 286 return true;
287} 287}
288 288
289/*H:360 289/*H:360
@@ -296,7 +296,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
296 * 296 *
297 * This is a quick version which answers the question: is this virtual address 297 * This is a quick version which answers the question: is this virtual address
298 * mapped by the shadow page tables, and is it writable? */ 298 * mapped by the shadow page tables, and is it writable? */
299static int page_writable(struct lg_cpu *cpu, unsigned long vaddr) 299static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
300{ 300{
301 pgd_t *spgd; 301 pgd_t *spgd;
302 unsigned long flags; 302 unsigned long flags;
@@ -304,7 +304,7 @@ static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
304 /* Look at the current top level entry: is it present? */ 304 /* Look at the current top level entry: is it present? */
305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
307 return 0; 307 return false;
308 308
309 /* Check the flags on the pte entry itself: it must be present and 309 /* Check the flags on the pte entry itself: it must be present and
310 * writable. */ 310 * writable. */
@@ -373,8 +373,10 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
373 /* First step: get the top-level Guest page table entry. */ 373 /* First step: get the top-level Guest page table entry. */
374 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 374 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
375 /* Toplevel not present? We can't map it in. */ 375 /* Toplevel not present? We can't map it in. */
376 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 376 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
377 kill_guest(cpu, "Bad address %#lx", vaddr); 377 kill_guest(cpu, "Bad address %#lx", vaddr);
378 return -1UL;
379 }
378 380
379 gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); 381 gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
380 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 382 if (!(pte_flags(gpte) & _PAGE_PRESENT))
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index ec6aa3f1c36b..4f15439b7f12 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -45,7 +45,7 @@
45 * "Task State Segment" which controls all kinds of delicate things. The 45 * "Task State Segment" which controls all kinds of delicate things. The
46 * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the 46 * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
47 * the Guest can't be trusted to deal with double faults. */ 47 * the Guest can't be trusted to deal with double faults. */
48static int ignored_gdt(unsigned int num) 48static bool ignored_gdt(unsigned int num)
49{ 49{
50 return (num == GDT_ENTRY_TSS 50 return (num == GDT_ENTRY_TSS
51 || num == GDT_ENTRY_LGUEST_CS 51 || num == GDT_ENTRY_LGUEST_CS
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index bf7942327bda..a6b717644be0 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -290,6 +290,57 @@ static int emulate_insn(struct lg_cpu *cpu)
290 return 1; 290 return 1;
291} 291}
292 292
293/* Our hypercalls mechanism used to be based on direct software interrupts.
294 * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to
295 * change over to using kvm hypercalls.
296 *
297 * KVM_HYPERCALL is actually a "vmcall" instruction, which generates an invalid
298 * opcode fault (fault 6) on non-VT cpus, so the easiest solution seemed to be
299 * an *emulation approach*: if the fault was really produced by an hypercall
300 * (is_hypercall() does exactly this check), we can just call the corresponding
301 * hypercall host implementation function.
302 *
303 * But these invalid opcode faults are notably slower than software interrupts.
304 * So we implemented the *patching (or rewriting) approach*: every time we hit
305 * the KVM_HYPERCALL opcode in Guest code, we patch it to the old "int 0x1f"
306 * opcode, so next time the Guest calls this hypercall it will use the
307 * faster trap mechanism.
308 *
309 * Matias even benchmarked it to convince you: this shows the average cycle
310 * cost of a hypercall. For each alternative solution mentioned above we've
311 * made 5 runs of the benchmark:
312 *
313 * 1) direct software interrupt: 2915, 2789, 2764, 2721, 2898
314 * 2) emulation technique: 3410, 3681, 3466, 3392, 3780
315 * 3) patching (rewrite) technique: 2977, 2975, 2891, 2637, 2884
316 *
317 * One two-line function is worth a 20% hypercall speed boost!
318 */
319static void rewrite_hypercall(struct lg_cpu *cpu)
320{
321 /* This are the opcodes we use to patch the Guest. The opcode for "int
322 * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we
323 * complete the sequence with a NOP (0x90). */
324 u8 insn[3] = {0xcd, 0x1f, 0x90};
325
326 __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
327}
328
329static bool is_hypercall(struct lg_cpu *cpu)
330{
331 u8 insn[3];
332
333 /* This must be the Guest kernel trying to do something.
334 * The bottom two bits of the CS segment register are the privilege
335 * level. */
336 if ((cpu->regs->cs & 3) != GUEST_PL)
337 return false;
338
339 /* Is it a vmcall? */
340 __lgread(cpu, insn, guest_pa(cpu, cpu->regs->eip), sizeof(insn));
341 return insn[0] == 0x0f && insn[1] == 0x01 && insn[2] == 0xc1;
342}
343
293/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ 344/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
294void lguest_arch_handle_trap(struct lg_cpu *cpu) 345void lguest_arch_handle_trap(struct lg_cpu *cpu)
295{ 346{
@@ -337,7 +388,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
337 break; 388 break;
338 case 32 ... 255: 389 case 32 ... 255:
339 /* These values mean a real interrupt occurred, in which case 390 /* These values mean a real interrupt occurred, in which case
340 * the Host handler has already been run. We just do a 391 * the Host handler has already been run. We just do a
341 * friendly check if another process should now be run, then 392 * friendly check if another process should now be run, then
342 * return to run the Guest again */ 393 * return to run the Guest again */
343 cond_resched(); 394 cond_resched();
@@ -347,6 +398,15 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
347 * up the pointer now to indicate a hypercall is pending. */ 398 * up the pointer now to indicate a hypercall is pending. */
348 cpu->hcall = (struct hcall_args *)cpu->regs; 399 cpu->hcall = (struct hcall_args *)cpu->regs;
349 return; 400 return;
401 case 6:
402 /* kvm hypercalls trigger an invalid opcode fault (6).
403 * We need to check if ring == GUEST_PL and
404 * faulting instruction == vmcall. */
405 if (is_hypercall(cpu)) {
406 rewrite_hypercall(cpu);
407 return;
408 }
409 break;
350 } 410 }
351 411
352 /* We didn't handle the trap, so it needs to go to the Guest. */ 412 /* We didn't handle the trap, so it needs to go to the Guest. */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5777196bf6c9..5c52369ab9bb 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,15 +23,21 @@
23 23
24#ifdef DEBUG 24#ifdef DEBUG
25/* For development, we want to crash whenever the ring is screwed. */ 25/* For development, we want to crash whenever the ring is screwed. */
26#define BAD_RING(vq, fmt...) \ 26#define BAD_RING(_vq, fmt...) \
27 do { dev_err(&vq->vq.vdev->dev, fmt); BUG(); } while(0) 27 do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0)
28#define START_USE(vq) \ 28/* Caller is supposed to guarantee no reentry. */
29 do { if ((vq)->in_use) panic("in_use = %i\n", (vq)->in_use); (vq)->in_use = __LINE__; mb(); } while(0) 29#define START_USE(_vq) \
30#define END_USE(vq) \ 30 do { \
31 do { BUG_ON(!(vq)->in_use); (vq)->in_use = 0; mb(); } while(0) 31 if ((_vq)->in_use) \
32 panic("in_use = %i\n", (_vq)->in_use); \
33 (_vq)->in_use = __LINE__; \
34 mb(); \
35 } while(0)
36#define END_USE(_vq) \
37 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
32#else 38#else
33#define BAD_RING(vq, fmt...) \ 39#define BAD_RING(_vq, fmt...) \
34 do { dev_err(&vq->vq.vdev->dev, fmt); (vq)->broken = true; } while(0) 40 do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0)
35#define START_USE(vq) 41#define START_USE(vq)
36#define END_USE(vq) 42#define END_USE(vq)
37#endif 43#endif