diff options
author | Yinghai Lu <yinghai@kernel.org> | 2009-01-30 20:29:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-05 16:27:56 -0500 |
commit | c5e954820335ef5aed1662b70aaf5deb9de16735 (patch) | |
tree | 0517328a865ac68646f30981f5a1ca9763af1aac /arch/x86/include/asm/ipi.h | |
parent | fdbecd9fd14198853bec4cbae8bc7af93f2e3de3 (diff) |
x86: move default_ipi_xx back to ipi.c
Impact: cleanup
only leave _default_ipi_xx etc in .h
Beyond the cleanup factor, this saves a bit of code size as well:
text data bss dec hex filename
7281931 1630144 1463304 10375379 9e50d3 vmlinux.before
7281753 1630144 1463304 10375201 9e5021 vmlinux.after
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/ipi.h')
-rw-r--r-- | arch/x86/include/asm/ipi.h | 127 |
1 files changed, 13 insertions, 114 deletions
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index aa79945445b5..5f2efc5d9927 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h | |||
@@ -119,112 +119,22 @@ static inline void | |||
119 | native_apic_mem_write(APIC_ICR, cfg); | 119 | native_apic_mem_write(APIC_ICR, cfg); |
120 | } | 120 | } |
121 | 121 | ||
122 | static inline void | 122 | extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, |
123 | default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) | 123 | int vector); |
124 | { | 124 | extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, |
125 | unsigned long query_cpu; | 125 | int vector); |
126 | unsigned long flags; | ||
127 | |||
128 | /* | ||
129 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
130 | * to an arbitrary mask, so I do a unicast to each CPU instead. | ||
131 | * - mbligh | ||
132 | */ | ||
133 | local_irq_save(flags); | ||
134 | for_each_cpu(query_cpu, mask) { | ||
135 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, | ||
136 | query_cpu), vector, APIC_DEST_PHYSICAL); | ||
137 | } | ||
138 | local_irq_restore(flags); | ||
139 | } | ||
140 | |||
141 | static inline void | ||
142 | default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector) | ||
143 | { | ||
144 | unsigned int this_cpu = smp_processor_id(); | ||
145 | unsigned int query_cpu; | ||
146 | unsigned long flags; | ||
147 | |||
148 | /* See Hack comment above */ | ||
149 | |||
150 | local_irq_save(flags); | ||
151 | for_each_cpu(query_cpu, mask) { | ||
152 | if (query_cpu == this_cpu) | ||
153 | continue; | ||
154 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, | ||
155 | query_cpu), vector, APIC_DEST_PHYSICAL); | ||
156 | } | ||
157 | local_irq_restore(flags); | ||
158 | } | ||
159 | |||
160 | #include <asm/genapic.h> | 126 | #include <asm/genapic.h> |
161 | 127 | ||
162 | static inline void | 128 | extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, |
163 | default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) | 129 | int vector); |
164 | { | 130 | extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, |
165 | unsigned long flags; | 131 | int vector); |
166 | unsigned int query_cpu; | ||
167 | |||
168 | /* | ||
169 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
170 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | ||
171 | * should be modified to do 1 message per cluster ID - mbligh | ||
172 | */ | ||
173 | |||
174 | local_irq_save(flags); | ||
175 | for_each_cpu(query_cpu, mask) | ||
176 | __default_send_IPI_dest_field( | ||
177 | apic->cpu_to_logical_apicid(query_cpu), vector, | ||
178 | apic->dest_logical); | ||
179 | local_irq_restore(flags); | ||
180 | } | ||
181 | |||
182 | static inline void | ||
183 | default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | unsigned int query_cpu; | ||
187 | unsigned int this_cpu = smp_processor_id(); | ||
188 | |||
189 | /* See Hack comment above */ | ||
190 | |||
191 | local_irq_save(flags); | ||
192 | for_each_cpu(query_cpu, mask) { | ||
193 | if (query_cpu == this_cpu) | ||
194 | continue; | ||
195 | __default_send_IPI_dest_field( | ||
196 | apic->cpu_to_logical_apicid(query_cpu), vector, | ||
197 | apic->dest_logical); | ||
198 | } | ||
199 | local_irq_restore(flags); | ||
200 | } | ||
201 | 132 | ||
202 | /* Avoid include hell */ | 133 | /* Avoid include hell */ |
203 | #define NMI_VECTOR 0x02 | 134 | #define NMI_VECTOR 0x02 |
204 | 135 | ||
205 | extern int no_broadcast; | 136 | extern int no_broadcast; |
206 | 137 | ||
207 | #ifndef CONFIG_X86_64 | ||
208 | /* | ||
209 | * This is only used on smaller machines. | ||
210 | */ | ||
211 | static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector) | ||
212 | { | ||
213 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
214 | unsigned long flags; | ||
215 | |||
216 | local_irq_save(flags); | ||
217 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); | ||
218 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); | ||
219 | local_irq_restore(flags); | ||
220 | } | ||
221 | |||
222 | static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector) | ||
223 | { | ||
224 | default_send_IPI_mask_bitmask_logical(mask, vector); | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | static inline void __default_local_send_IPI_allbutself(int vector) | 138 | static inline void __default_local_send_IPI_allbutself(int vector) |
229 | { | 139 | { |
230 | if (no_broadcast || vector == NMI_VECTOR) | 140 | if (no_broadcast || vector == NMI_VECTOR) |
@@ -242,22 +152,11 @@ static inline void __default_local_send_IPI_all(int vector) | |||
242 | } | 152 | } |
243 | 153 | ||
244 | #ifdef CONFIG_X86_32 | 154 | #ifdef CONFIG_X86_32 |
245 | static inline void default_send_IPI_allbutself(int vector) | 155 | extern void default_send_IPI_mask_logical(const struct cpumask *mask, |
246 | { | 156 | int vector); |
247 | /* | 157 | extern void default_send_IPI_allbutself(int vector); |
248 | * if there are no other CPUs in the system then we get an APIC send | 158 | extern void default_send_IPI_all(int vector); |
249 | * error if we try to broadcast, thus avoid sending IPIs in this case. | 159 | extern void default_send_IPI_self(int vector); |
250 | */ | ||
251 | if (!(num_online_cpus() > 1)) | ||
252 | return; | ||
253 | |||
254 | __default_local_send_IPI_allbutself(vector); | ||
255 | } | ||
256 | |||
257 | static inline void default_send_IPI_all(int vector) | ||
258 | { | ||
259 | __default_local_send_IPI_all(vector); | ||
260 | } | ||
261 | #endif | 160 | #endif |
262 | 161 | ||
263 | #endif | 162 | #endif |