diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:36:52 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-03-27 11:08:45 -0400 |
commit | 04c44a080d2f699a3042d4e743f7ad2ffae9d538 (patch) | |
tree | f198aa2f653a8b2cd0e4d33ccc68587e9027cd81 /arch/x86/xen/xen-asm.S | |
parent | 5abbcf29928966b28d70f8a0da424f55582f8603 (diff) |
xen: fix RMW when unmasking events
xen_irq_enable_direct and xen_sysexit were using "andw $0x00ff,
XEN_vcpu_info_pending(vcpu)" to unmask events and test for pending ones
in one instuction.
Unfortunately, the pending flag must be modified with a locked operation
since it can be set by another CPU, and the unlocked form of this
operation was causing the pending flag to get lost, allowing the processor
to return to usermode with pending events and ultimately deadlock.
The simple fix would be to make it a locked operation, but that's rather
costly and unnecessary. The fix here is to split the mask-clearing and
pending-testing into two instructions; the interrupt window between
them is of no concern because either way pending or new events will
be processed.
This should fix lingering bugs in using direct vcpu structure access too.
[ Stable: needed in 2.6.24.x ]
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stable <stable@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r-- | arch/x86/xen/xen-asm.S | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 1a43b60c0c62..6b7190449d07 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -33,12 +33,17 @@ | |||
33 | events, then enter the hypervisor to get them handled. | 33 | events, then enter the hypervisor to get them handled. |
34 | */ | 34 | */ |
35 | ENTRY(xen_irq_enable_direct) | 35 | ENTRY(xen_irq_enable_direct) |
36 | /* Clear mask and test pending */ | 36 | /* Unmask events */ |
37 | andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending | 37 | movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask |
38 | |||
38 | /* Preempt here doesn't matter because that will deal with | 39 | /* Preempt here doesn't matter because that will deal with |
39 | any pending interrupts. The pending check may end up being | 40 | any pending interrupts. The pending check may end up being |
40 | run on the wrong CPU, but that doesn't hurt. */ | 41 | run on the wrong CPU, but that doesn't hurt. */ |
42 | |||
43 | /* Test for pending */ | ||
44 | testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending | ||
41 | jz 1f | 45 | jz 1f |
46 | |||
42 | 2: call check_events | 47 | 2: call check_events |
43 | 1: | 48 | 1: |
44 | ENDPATCH(xen_irq_enable_direct) | 49 | ENDPATCH(xen_irq_enable_direct) |