diff options
author | Igor Mammedov <imammedo@redhat.com> | 2011-09-01 07:46:55 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-09-01 12:54:42 -0400 |
commit | d198d499148a0c64a41b3aba9e7dd43772832b91 (patch) | |
tree | ddc0bf46ac10d84db5d06dceb44c21770692f596 /arch | |
parent | d312ae878b6aed3912e1acaaf5d0b2a9d08a4f11 (diff) |
xen: x86_32: do not enable iterrupts when returning from exception in interrupt context
If vmalloc page_fault happens inside of interrupt handler with interrupts
disabled then on exit path from exception handler when there is no pending
interrupts, the following code (arch/x86/xen/xen-asm_32.S:112):
cmpw $0x0001, XEN_vcpu_info_pending(%eax)
sete XEN_vcpu_info_mask(%eax)
will enable interrupts even if they has been previously disabled according to
eflags from the bounce frame (arch/x86/xen/xen-asm_32.S:99)
testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
setz XEN_vcpu_info_mask(%eax)
Solution is in setting XEN_vcpu_info_mask only when it should be set
according to
cmpw $0x0001, XEN_vcpu_info_pending(%eax)
but not clearing it if there isn't any pending events.
Reproducer for bug is attached to RHBZ 707552
CC: stable@kernel.org
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Acked-by: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/xen/xen-asm_32.S | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 22a2093b5862..b040b0e518ca 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S | |||
@@ -113,11 +113,13 @@ xen_iret_start_crit: | |||
113 | 113 | ||
114 | /* | 114 | /* |
115 | * If there's something pending, mask events again so we can | 115 | * If there's something pending, mask events again so we can |
116 | * jump back into xen_hypervisor_callback | 116 | * jump back into xen_hypervisor_callback. Otherwise do not |
117 | * touch XEN_vcpu_info_mask. | ||
117 | */ | 118 | */ |
118 | sete XEN_vcpu_info_mask(%eax) | 119 | jne 1f |
120 | movb $1, XEN_vcpu_info_mask(%eax) | ||
119 | 121 | ||
120 | popl %eax | 122 | 1: popl %eax |
121 | 123 | ||
122 | /* | 124 | /* |
123 | * From this point on the registers are restored and the stack | 125 | * From this point on the registers are restored and the stack |