aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/ipi.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-06 10:44:14 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-06 10:45:01 -0500
commitf0ef03985130287c6c84ebe69416cf790e6cc00e (patch)
tree3ecb04cc4d82e5fc3ae5f1747e6da172ae8cbcb7 /arch/x86/include/asm/ipi.h
parent16097439703bcd38e9fe5608c12add6dacb825ea (diff)
parent31bbed527e7039203920c51c9fb48c27aed0820c (diff)
Merge branch 'x86/core' into tracing/textedit
Conflicts: arch/x86/Kconfig block/blktrace.c kernel/irq/handle.c Semantic conflict: kernel/trace/blktrace.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/ipi.h')
-rw-r--r--arch/x86/include/asm/ipi.h75
1 files changed, 40 insertions, 35 deletions
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d..0b7228268a6 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_IPI_H 1#ifndef _ASM_X86_IPI_H
2#define _ASM_X86_IPI_H 2#define _ASM_X86_IPI_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
55 cpu_relax(); 57 cpu_relax();
56} 58}
57 59
58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, 60static inline void
59 unsigned int dest) 61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
60{ 62{
61 /* 63 /*
62 * Subtle. In the case of the 'never do double writes' workaround 64 * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
87 * This is used to send an IPI with no shorthand notation (the destination is 89 * This is used to send an IPI with no shorthand notation (the destination is
88 * specified in bits 56 to 63 of the ICR). 90 * specified in bits 56 to 63 of the ICR).
89 */ 91 */
90static inline void __send_IPI_dest_field(unsigned int mask, int vector, 92static inline void
91 unsigned int dest) 93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
92{ 94{
93 unsigned long cfg; 95 unsigned long cfg;
94 96
@@ -117,41 +119,44 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
118} 120}
119 121
120static inline void send_IPI_mask_sequence(const struct cpumask *mask, 122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
121 int vector) 123 int vector);
122{ 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
123 unsigned long flags; 125 int vector);
124 unsigned long query_cpu; 126extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 int vector);
128extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 int vector);
125 130
126 /* 131/* Avoid include hell */
127 * Hack. The clustered APIC addressing mode doesn't allow us to send 132#define NMI_VECTOR 0x02
128 * to an arbitrary mask, so I do a unicast to each CPU instead. 133
129 * - mbligh 134extern int no_broadcast;
130 */ 135
131 local_irq_save(flags); 136static inline void __default_local_send_IPI_allbutself(int vector)
132 for_each_cpu(query_cpu, mask) { 137{
133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 138 if (no_broadcast || vector == NMI_VECTOR)
134 vector, APIC_DEST_PHYSICAL); 139 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
135 } 140 else
136 local_irq_restore(flags); 141 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
137} 142}
138 143
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask, 144static inline void __default_local_send_IPI_all(int vector)
140 int vector)
141{ 145{
142 unsigned long flags; 146 if (no_broadcast || vector == NMI_VECTOR)
143 unsigned int query_cpu; 147 apic->send_IPI_mask(cpu_online_mask, vector);
144 unsigned int this_cpu = smp_processor_id(); 148 else
145 149 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155} 150}
156 151
152#ifdef CONFIG_X86_32
153extern void default_send_IPI_mask_logical(const struct cpumask *mask,
154 int vector);
155extern void default_send_IPI_allbutself(int vector);
156extern void default_send_IPI_all(int vector);
157extern void default_send_IPI_self(int vector);
158#endif
159
160#endif
161
157#endif /* _ASM_X86_IPI_H */ 162#endif /* _ASM_X86_IPI_H */