aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ipi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ipi.c')
-rw-r--r--arch/x86/kernel/ipi.c139
1 files changed, 2 insertions, 137 deletions
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 0893fa144581..339f4f3feee5 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -17,148 +17,13 @@
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/proto.h> 19#include <asm/proto.h>
20#include <asm/ipi.h>
20 21
21#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
22#include <asm/genapic.h>
23
24/*
25 * the following functions deal with sending IPIs between CPUs.
26 *
27 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
28 */
29
30static inline int __prepare_ICR(unsigned int shortcut, int vector)
31{
32 unsigned int icr = shortcut | apic->dest_logical;
33
34 switch (vector) {
35 default:
36 icr |= APIC_DM_FIXED | vector;
37 break;
38 case NMI_VECTOR:
39 icr |= APIC_DM_NMI;
40 break;
41 }
42 return icr;
43}
44
45static inline int __prepare_ICR2(unsigned int mask)
46{
47 return SET_APIC_DEST_FIELD(mask);
48}
49
50void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
51{
52 /*
53 * Subtle. In the case of the 'never do double writes' workaround
54 * we have to lock out interrupts to be safe. As we don't care
55 * of the value read we use an atomic rmw access to avoid costly
56 * cli/sti. Otherwise we use an even cheaper single atomic write
57 * to the APIC.
58 */
59 unsigned int cfg;
60
61 /*
62 * Wait for idle.
63 */
64 apic_wait_icr_idle();
65
66 /*
67 * No need to touch the target chip field
68 */
69 cfg = __prepare_ICR(shortcut, vector);
70
71 /*
72 * Send the IPI. The write to APIC_ICR fires this off.
73 */
74 apic_write(APIC_ICR, cfg);
75}
76 23
77void default_send_IPI_self(int vector) 24void default_send_IPI_self(int vector)
78{ 25{
79 __default_send_IPI_shortcut(APIC_DEST_SELF, vector); 26 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
80}
81
82/*
83 * This is used to send an IPI with no shorthand notation (the destination is
84 * specified in bits 56 to 63 of the ICR).
85 */
86static inline void __default_send_IPI_dest_field(unsigned long mask, int vector)
87{
88 unsigned long cfg;
89
90 /*
91 * Wait for idle.
92 */
93 if (unlikely(vector == NMI_VECTOR))
94 safe_apic_wait_icr_idle();
95 else
96 apic_wait_icr_idle();
97
98 /*
99 * prepare target chip field
100 */
101 cfg = __prepare_ICR2(mask);
102 apic_write(APIC_ICR2, cfg);
103
104 /*
105 * program the ICR
106 */
107 cfg = __prepare_ICR(0, vector);
108
109 /*
110 * Send the IPI. The write to APIC_ICR fires this off.
111 */
112 apic_write(APIC_ICR, cfg);
113}
114
115/*
116 * This is only used on smaller machines.
117 */
118void default_send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
119{
120 unsigned long mask = cpumask_bits(cpumask)[0];
121 unsigned long flags;
122
123 local_irq_save(flags);
124 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
125 __default_send_IPI_dest_field(mask, vector);
126 local_irq_restore(flags);
127}
128
129void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector)
130{
131 unsigned long flags;
132 unsigned int query_cpu;
133
134 /*
135 * Hack. The clustered APIC addressing mode doesn't allow us to send
136 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
137 * should be modified to do 1 message per cluster ID - mbligh
138 */
139
140 local_irq_save(flags);
141 for_each_cpu(query_cpu, mask)
142 __default_send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector);
143 local_irq_restore(flags);
144}
145
146void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
147{
148 unsigned long flags;
149 unsigned int query_cpu;
150 unsigned int this_cpu = smp_processor_id();
151
152 /* See Hack comment above */
153
154 local_irq_save(flags);
155 for_each_cpu(query_cpu, mask) {
156 if (query_cpu == this_cpu)
157 continue;
158 __default_send_IPI_dest_field(
159 apic->cpu_to_logical_apicid(query_cpu), vector);
160 }
161 local_irq_restore(flags);
162} 27}
163 28
164/* must come after the send_IPI functions above for inlining */ 29/* must come after the send_IPI functions above for inlining */