diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-10 05:16:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-10 05:17:48 -0400 |
commit | 8293dd6f86e759068ce918aa10ca9c5d6d711cd0 (patch) | |
tree | de366d69078bf2b98c6765fa4ec1cc652f3d3173 /arch/x86/kernel | |
parent | 631595fbf4aeac260e664a8a002897e4db6a50dd (diff) | |
parent | 467c88fee51e2ae862e9485245687da0730e29aa (diff) |
Merge branch 'x86/core' into tracing/ftrace
Semantic merge:
kernel/trace/trace_functions_graph.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 52 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 73 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 78 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 2 |
5 files changed, 129 insertions, 101 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 25423a5b80ed..f47df59016c5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <asm/io.h> | 5 | #include <asm/io.h> |
6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
8 | #include <asm/cpu.h> | ||
8 | 9 | ||
9 | #ifdef CONFIG_X86_64 | 10 | #ifdef CONFIG_X86_64 |
10 | # include <asm/numa_64.h> | 11 | # include <asm/numa_64.h> |
@@ -141,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
141 | } | 142 | } |
142 | } | 143 | } |
143 | 144 | ||
145 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | ||
146 | { | ||
147 | #ifdef CONFIG_SMP | ||
148 | /* calling is from identify_secondary_cpu() ? */ | ||
149 | if (c->cpu_index == boot_cpu_id) | ||
150 | return; | ||
151 | |||
152 | /* | ||
153 | * Certain Athlons might work (for various values of 'work') in SMP | ||
154 | * but they are not certified as MP capable. | ||
155 | */ | ||
156 | /* Athlon 660/661 is valid. */ | ||
157 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | ||
158 | (c->x86_mask == 1))) | ||
159 | goto valid_k7; | ||
160 | |||
161 | /* Duron 670 is valid */ | ||
162 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | ||
163 | goto valid_k7; | ||
164 | |||
165 | /* | ||
166 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | ||
167 | * bit. It's worth noting that the A5 stepping (662) of some | ||
168 | * Athlon XP's have the MP bit set. | ||
169 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | ||
170 | * more. | ||
171 | */ | ||
172 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | ||
173 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | ||
174 | (c->x86_model > 7)) | ||
175 | if (cpu_has_mp) | ||
176 | goto valid_k7; | ||
177 | |||
178 | /* If we get here, not a certified SMP capable AMD system. */ | ||
179 | |||
180 | /* | ||
181 | * Don't taint if we are running SMP kernel on a single non-MP | ||
182 | * approved Athlon | ||
183 | */ | ||
184 | WARN_ONCE(1, "WARNING: This combination of AMD" | ||
185 | "processors is not suitable for SMP.\n"); | ||
186 | if (!test_taint(TAINT_UNSAFE_SMP)) | ||
187 | add_taint(TAINT_UNSAFE_SMP); | ||
188 | |||
189 | valid_k7: | ||
190 | ; | ||
191 | #endif | ||
192 | } | ||
193 | |||
144 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | 194 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) |
145 | { | 195 | { |
146 | u32 l, h; | 196 | u32 l, h; |
@@ -175,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
175 | } | 225 | } |
176 | 226 | ||
177 | set_cpu_cap(c, X86_FEATURE_K7); | 227 | set_cpu_cap(c, X86_FEATURE_K7); |
228 | |||
229 | amd_k7_smp_check(c); | ||
178 | } | 230 | } |
179 | #endif | 231 | #endif |
180 | 232 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1a89a2b68d15..c1c04bf0df77 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | #include <asm/cpu.h> | ||
17 | 18 | ||
18 | #ifdef CONFIG_X86_64 | 19 | #ifdef CONFIG_X86_64 |
19 | #include <asm/topology.h> | 20 | #include <asm/topology.h> |
@@ -116,6 +117,28 @@ static void __cpuinit trap_init_f00f_bug(void) | |||
116 | } | 117 | } |
117 | #endif | 118 | #endif |
118 | 119 | ||
120 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | ||
121 | { | ||
122 | #ifdef CONFIG_SMP | ||
123 | /* calling is from identify_secondary_cpu() ? */ | ||
124 | if (c->cpu_index == boot_cpu_id) | ||
125 | return; | ||
126 | |||
127 | /* | ||
128 | * Mask B, Pentium, but not Pentium MMX | ||
129 | */ | ||
130 | if (c->x86 == 5 && | ||
131 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
132 | c->x86_model <= 3) { | ||
133 | /* | ||
134 | * Remember we have B step Pentia with bugs | ||
135 | */ | ||
136 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | ||
137 | "with B stepping processors.\n"); | ||
138 | } | ||
139 | #endif | ||
140 | } | ||
141 | |||
119 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 142 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
120 | { | 143 | { |
121 | unsigned long lo, hi; | 144 | unsigned long lo, hi; |
@@ -192,6 +215,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
192 | #ifdef CONFIG_X86_NUMAQ | 215 | #ifdef CONFIG_X86_NUMAQ |
193 | numaq_tsc_disable(); | 216 | numaq_tsc_disable(); |
194 | #endif | 217 | #endif |
218 | |||
219 | intel_smp_check(c); | ||
195 | } | 220 | } |
196 | #else | 221 | #else |
197 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 222 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index c29f301d3885..efa615f2bf43 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -42,6 +42,19 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { | |||
42 | }; | 42 | }; |
43 | EXPORT_SYMBOL(__per_cpu_offset); | 43 | EXPORT_SYMBOL(__per_cpu_offset); |
44 | 44 | ||
45 | /* | ||
46 | * On x86_64 symbols referenced from code should be reachable using | ||
47 | * 32bit relocations. Reserve space for static percpu variables in | ||
48 | * modules so that they are always served from the first chunk which | ||
49 | * is located at the percpu segment base. On x86_32, anything can | ||
50 | * address anywhere. No need to reserve space in the first chunk. | ||
51 | */ | ||
52 | #ifdef CONFIG_X86_64 | ||
53 | #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE | ||
54 | #else | ||
55 | #define PERCPU_FIRST_CHUNK_RESERVE 0 | ||
56 | #endif | ||
57 | |||
45 | /** | 58 | /** |
46 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | 59 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA |
47 | * | 60 | * |
@@ -141,7 +154,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) | |||
141 | { | 154 | { |
142 | static struct vm_struct vm; | 155 | static struct vm_struct vm; |
143 | pg_data_t *last; | 156 | pg_data_t *last; |
144 | size_t ptrs_size; | 157 | size_t ptrs_size, dyn_size; |
145 | unsigned int cpu; | 158 | unsigned int cpu; |
146 | ssize_t ret; | 159 | ssize_t ret; |
147 | 160 | ||
@@ -169,12 +182,14 @@ proceed: | |||
169 | * Currently supports only single page. Supporting multiple | 182 | * Currently supports only single page. Supporting multiple |
170 | * pages won't be too difficult if it ever becomes necessary. | 183 | * pages won't be too difficult if it ever becomes necessary. |
171 | */ | 184 | */ |
172 | pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); | 185 | pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + |
186 | PERCPU_DYNAMIC_RESERVE); | ||
173 | if (pcpur_size > PMD_SIZE) { | 187 | if (pcpur_size > PMD_SIZE) { |
174 | pr_warning("PERCPU: static data is larger than large page, " | 188 | pr_warning("PERCPU: static data is larger than large page, " |
175 | "can't use large page\n"); | 189 | "can't use large page\n"); |
176 | return -EINVAL; | 190 | return -EINVAL; |
177 | } | 191 | } |
192 | dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; | ||
178 | 193 | ||
179 | /* allocate pointer array and alloc large pages */ | 194 | /* allocate pointer array and alloc large pages */ |
180 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | 195 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); |
@@ -217,8 +232,9 @@ proceed: | |||
217 | pr_info("PERCPU: Remapped at %p with large pages, static data " | 232 | pr_info("PERCPU: Remapped at %p with large pages, static data " |
218 | "%zu bytes\n", vm.addr, static_size); | 233 | "%zu bytes\n", vm.addr, static_size); |
219 | 234 | ||
220 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, | 235 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, |
221 | pcpur_size - static_size, vm.addr, NULL); | 236 | PERCPU_FIRST_CHUNK_RESERVE, |
237 | PMD_SIZE, dyn_size, vm.addr, NULL); | ||
222 | goto out_free_ar; | 238 | goto out_free_ar; |
223 | 239 | ||
224 | enomem: | 240 | enomem: |
@@ -241,24 +257,31 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) | |||
241 | * Embedding allocator | 257 | * Embedding allocator |
242 | * | 258 | * |
243 | * The first chunk is sized to just contain the static area plus | 259 | * The first chunk is sized to just contain the static area plus |
244 | * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using | 260 | * module and dynamic reserves, and allocated as a contiguous area |
245 | * bootmem allocator and used as-is without being mapped into vmalloc | 261 | * using bootmem allocator and used as-is without being mapped into |
246 | * area. This enables the first chunk to piggy back on the linear | 262 | * vmalloc area. This enables the first chunk to piggy back on the |
247 | * physical PMD mapping and doesn't add any additional pressure to | 263 | * linear physical PMD mapping and doesn't add any additional pressure |
248 | * TLB. | 264 | * to TLB. Note that if the needed size is smaller than the minimum |
265 | * unit size, the leftover is returned to the bootmem allocator. | ||
249 | */ | 266 | */ |
250 | static void *pcpue_ptr __initdata; | 267 | static void *pcpue_ptr __initdata; |
268 | static size_t pcpue_size __initdata; | ||
251 | static size_t pcpue_unit_size __initdata; | 269 | static size_t pcpue_unit_size __initdata; |
252 | 270 | ||
253 | static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | 271 | static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) |
254 | { | 272 | { |
255 | return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size | 273 | size_t off = (size_t)pageno << PAGE_SHIFT; |
256 | + ((size_t)pageno << PAGE_SHIFT)); | 274 | |
275 | if (off >= pcpue_size) | ||
276 | return NULL; | ||
277 | |||
278 | return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); | ||
257 | } | 279 | } |
258 | 280 | ||
259 | static ssize_t __init setup_pcpu_embed(size_t static_size) | 281 | static ssize_t __init setup_pcpu_embed(size_t static_size) |
260 | { | 282 | { |
261 | unsigned int cpu; | 283 | unsigned int cpu; |
284 | size_t dyn_size; | ||
262 | 285 | ||
263 | /* | 286 | /* |
264 | * If large page isn't supported, there's no benefit in doing | 287 | * If large page isn't supported, there's no benefit in doing |
@@ -269,25 +292,32 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) | |||
269 | return -EINVAL; | 292 | return -EINVAL; |
270 | 293 | ||
271 | /* allocate and copy */ | 294 | /* allocate and copy */ |
272 | pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); | 295 | pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + |
273 | pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); | 296 | PERCPU_DYNAMIC_RESERVE); |
297 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); | ||
298 | dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; | ||
299 | |||
274 | pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, | 300 | pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, |
275 | PAGE_SIZE); | 301 | PAGE_SIZE); |
276 | if (!pcpue_ptr) | 302 | if (!pcpue_ptr) |
277 | return -ENOMEM; | 303 | return -ENOMEM; |
278 | 304 | ||
279 | for_each_possible_cpu(cpu) | 305 | for_each_possible_cpu(cpu) { |
280 | memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, | 306 | void *ptr = pcpue_ptr + cpu * pcpue_unit_size; |
281 | static_size); | 307 | |
308 | free_bootmem(__pa(ptr + pcpue_size), | ||
309 | pcpue_unit_size - pcpue_size); | ||
310 | memcpy(ptr, __per_cpu_load, static_size); | ||
311 | } | ||
282 | 312 | ||
283 | /* we're ready, commit */ | 313 | /* we're ready, commit */ |
284 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", | 314 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", |
285 | pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); | 315 | pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); |
286 | 316 | ||
287 | return pcpu_setup_first_chunk(pcpue_get_page, static_size, | 317 | return pcpu_setup_first_chunk(pcpue_get_page, static_size, |
288 | pcpue_unit_size, | 318 | PERCPU_FIRST_CHUNK_RESERVE, |
289 | pcpue_unit_size - static_size, pcpue_ptr, | 319 | pcpue_unit_size, dyn_size, |
290 | NULL); | 320 | pcpue_ptr, NULL); |
291 | } | 321 | } |
292 | 322 | ||
293 | /* | 323 | /* |
@@ -344,7 +374,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) | |||
344 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", | 374 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", |
345 | pcpu4k_nr_static_pages, static_size); | 375 | pcpu4k_nr_static_pages, static_size); |
346 | 376 | ||
347 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, | 377 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, |
378 | PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, | ||
348 | pcpu4k_populate_pte); | 379 | pcpu4k_populate_pte); |
349 | goto out_free_ar; | 380 | goto out_free_ar; |
350 | 381 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 249334f5080a..ef7d10170c30 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -114,10 +114,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
114 | 114 | ||
115 | atomic_t init_deasserted; | 115 | atomic_t init_deasserted; |
116 | 116 | ||
117 | |||
118 | /* Set if we find a B stepping CPU */ | ||
119 | static int __cpuinitdata smp_b_stepping; | ||
120 | |||
121 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 117 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
122 | 118 | ||
123 | /* which logical CPUs are on which nodes */ | 119 | /* which logical CPUs are on which nodes */ |
@@ -271,8 +267,6 @@ static void __cpuinit smp_callin(void) | |||
271 | cpumask_set_cpu(cpuid, cpu_callin_mask); | 267 | cpumask_set_cpu(cpuid, cpu_callin_mask); |
272 | } | 268 | } |
273 | 269 | ||
274 | static int __cpuinitdata unsafe_smp; | ||
275 | |||
276 | /* | 270 | /* |
277 | * Activate a secondary processor. | 271 | * Activate a secondary processor. |
278 | */ | 272 | */ |
@@ -340,76 +334,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
340 | cpu_idle(); | 334 | cpu_idle(); |
341 | } | 335 | } |
342 | 336 | ||
343 | static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) | ||
344 | { | ||
345 | /* | ||
346 | * Mask B, Pentium, but not Pentium MMX | ||
347 | */ | ||
348 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
349 | c->x86 == 5 && | ||
350 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
351 | c->x86_model <= 3) | ||
352 | /* | ||
353 | * Remember we have B step Pentia with bugs | ||
354 | */ | ||
355 | smp_b_stepping = 1; | ||
356 | |||
357 | /* | ||
358 | * Certain Athlons might work (for various values of 'work') in SMP | ||
359 | * but they are not certified as MP capable. | ||
360 | */ | ||
361 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | ||
362 | |||
363 | if (num_possible_cpus() == 1) | ||
364 | goto valid_k7; | ||
365 | |||
366 | /* Athlon 660/661 is valid. */ | ||
367 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | ||
368 | (c->x86_mask == 1))) | ||
369 | goto valid_k7; | ||
370 | |||
371 | /* Duron 670 is valid */ | ||
372 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | ||
373 | goto valid_k7; | ||
374 | |||
375 | /* | ||
376 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | ||
377 | * bit. It's worth noting that the A5 stepping (662) of some | ||
378 | * Athlon XP's have the MP bit set. | ||
379 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | ||
380 | * more. | ||
381 | */ | ||
382 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | ||
383 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | ||
384 | (c->x86_model > 7)) | ||
385 | if (cpu_has_mp) | ||
386 | goto valid_k7; | ||
387 | |||
388 | /* If we get here, not a certified SMP capable AMD system. */ | ||
389 | unsafe_smp = 1; | ||
390 | } | ||
391 | |||
392 | valid_k7: | ||
393 | ; | ||
394 | } | ||
395 | |||
396 | static void __cpuinit smp_checks(void) | ||
397 | { | ||
398 | if (smp_b_stepping) | ||
399 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable" | ||
400 | "with B stepping processors.\n"); | ||
401 | |||
402 | /* | ||
403 | * Don't taint if we are running SMP kernel on a single non-MP | ||
404 | * approved Athlon | ||
405 | */ | ||
406 | if (unsafe_smp && num_online_cpus() > 1) { | ||
407 | printk(KERN_INFO "WARNING: This combination of AMD" | ||
408 | "processors is not suitable for SMP.\n"); | ||
409 | add_taint(TAINT_UNSAFE_SMP); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* | 337 | /* |
414 | * The bootstrap kernel entry code has set these up. Save them for | 338 | * The bootstrap kernel entry code has set these up. Save them for |
415 | * a given CPU | 339 | * a given CPU |
@@ -423,7 +347,6 @@ void __cpuinit smp_store_cpu_info(int id) | |||
423 | c->cpu_index = id; | 347 | c->cpu_index = id; |
424 | if (id != 0) | 348 | if (id != 0) |
425 | identify_secondary_cpu(c); | 349 | identify_secondary_cpu(c); |
426 | smp_apply_quirks(c); | ||
427 | } | 350 | } |
428 | 351 | ||
429 | 352 | ||
@@ -1193,7 +1116,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1193 | pr_debug("Boot done.\n"); | 1116 | pr_debug("Boot done.\n"); |
1194 | 1117 | ||
1195 | impress_friends(); | 1118 | impress_friends(); |
1196 | smp_checks(); | ||
1197 | #ifdef CONFIG_X86_IO_APIC | 1119 | #ifdef CONFIG_X86_IO_APIC |
1198 | setup_ioapic_dest(); | 1120 | setup_ioapic_dest(); |
1199 | #endif | 1121 | #endif |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f04549afcfe9..d038b9c45cf8 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -314,8 +314,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
314 | int locals = 0; | 314 | int locals = 0; |
315 | struct bau_desc *bau_desc; | 315 | struct bau_desc *bau_desc; |
316 | 316 | ||
317 | WARN_ON(!in_atomic()); | ||
318 | |||
319 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 317 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
320 | 318 | ||
321 | uv_cpu = uv_blade_processor_id(); | 319 | uv_cpu = uv_blade_processor_id(); |