aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/apic.h
diff options
context:
space:
mode:
authorFernando Luis VazquezCao <fernando@oss.ntt.co.jp>2007-05-02 13:27:17 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:17 -0400
commit8339e9fba33aa3205f541478c413982c0ac5a37f (patch)
tree1a669dcd3cb63caf811bf54d460883a2c99ecc28 /include/asm-x86_64/apic.h
parentf2b218dd6199983b120a96bc6531c1b81f4090d8 (diff)
[PATCH] x86-64: safe_apic_wait_icr_idle - x86_64
apic_wait_icr_idle looks like this: static __inline__ void apic_wait_icr_idle(void) { while (apic_read(APIC_ICR) & APIC_ICR_BUSY) cpu_relax(); } The busy loop in this function would not be problematic if the corresponding status bit in the ICR were always updated, but that does not seem to be the case under certain crash scenarios. Kdump uses an IPI to stop the other CPUs in the event of a crash, but when any of the other CPUs are locked-up inside the NMI handler the CPU that sends the IPI will end up looping forever in the ICR check, effectively hard-locking the whole system. Quoting from Intel's "MultiProcessor Specification" (Version 1.4), B-3: "A local APIC unit indicates successful dispatch of an IPI by resetting the Delivery Status bit in the Interrupt Command Register (ICR). The operating system polls the delivery status bit after sending an INIT or STARTUP IPI until the command has been dispatched. A period of 20 microseconds should be sufficient for IPI dispatch to complete under normal operating conditions. If the IPI is not successfully dispatched, the operating system can abort the command. Alternatively, the operating system can retry the IPI by writing the lower 32-bit double word of the ICR. This “time-out” mechanism can be implemented through an external interrupt, if interrupts are enabled on the processor, or through execution of an instruction or time-stamp counter spin loop." Intel's documentation suggests the implementation of a time-out mechanism, which, by the way, is already being open-coded in some parts of the kernel that tinker with ICR. Create a apic_wait_icr_idle replacement that implements the time-out mechanism and that can be used to solve the aforementioned problem. AK: moved both functions out of line AK: Added improved loop from Keith Owens Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include/asm-x86_64/apic.h')
-rw-r--r--include/asm-x86_64/apic.h8
1 files changed, 3 insertions, 5 deletions
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 2f3b013595ab..45e9fca1febc 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -2,6 +2,7 @@
2#define __ASM_APIC_H 2#define __ASM_APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h>
5#include <asm/fixmap.h> 6#include <asm/fixmap.h>
6#include <asm/apicdef.h> 7#include <asm/apicdef.h>
7#include <asm/system.h> 8#include <asm/system.h>
@@ -47,11 +48,8 @@ static __inline unsigned int apic_read(unsigned long reg)
47 return *((volatile unsigned int *)(APIC_BASE+reg)); 48 return *((volatile unsigned int *)(APIC_BASE+reg));
48} 49}
49 50
50static __inline__ void apic_wait_icr_idle(void) 51extern void apic_wait_icr_idle(void);
51{ 52extern unsigned int safe_apic_wait_icr_idle(void);
52 while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53 cpu_relax();
54}
55 53
56static inline void ack_APIC_irq(void) 54static inline void ack_APIC_irq(void)
57{ 55{