aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r--arch/x86/kernel/apic/apic.c33
-rw-r--r--arch/x86/kernel/apic/es7000_32.c21
-rw-r--r--arch/x86/kernel/apic/io_apic.c88
-rw-r--r--arch/x86/kernel/apic/numaq_32.c14
-rw-r--r--arch/x86/kernel/apic/summit_32.c22
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c24
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c27
7 files changed, 141 insertions, 88 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 96a2608252f1..b8d92606f84f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2123,24 +2123,26 @@ void default_init_apic_ldr(void)
2123 apic_write(APIC_LDR, val); 2123 apic_write(APIC_LDR, val);
2124} 2124}
2125 2125
2126unsigned int default_cpu_mask_to_apicid(const struct cpumask *cpumask) 2126static inline int __default_cpu_to_apicid(int cpu, unsigned int *apicid)
2127{ 2127{
2128 int cpu; 2128 if (likely((unsigned int)cpu < nr_cpu_ids)) {
2129 2129 *apicid = per_cpu(x86_cpu_to_apicid, cpu);
2130 /* 2130 return 0;
2131 * We're using fixed IRQ delivery, can only return one phys APIC ID. 2131 } else {
2132 * May as well be the first. 2132 return -EINVAL;
2133 */ 2133 }
2134 cpu = cpumask_first(cpumask); 2134}
2135 if (likely((unsigned)cpu < nr_cpu_ids))
2136 return per_cpu(x86_cpu_to_apicid, cpu);
2137 2135
2138 return BAD_APICID; 2136int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
2137 unsigned int *apicid)
2138{
2139 int cpu = cpumask_first(cpumask);
2140 return __default_cpu_to_apicid(cpu, apicid);
2139} 2141}
2140 2142
2141unsigned int 2143int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2142default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 2144 const struct cpumask *andmask,
2143 const struct cpumask *andmask) 2145 unsigned int *apicid)
2144{ 2146{
2145 int cpu; 2147 int cpu;
2146 2148
@@ -2148,7 +2150,8 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2148 if (cpumask_test_cpu(cpu, cpu_online_mask)) 2150 if (cpumask_test_cpu(cpu, cpu_online_mask))
2149 break; 2151 break;
2150 } 2152 }
2151 return per_cpu(x86_cpu_to_apicid, cpu); 2153
2154 return __default_cpu_to_apicid(cpu, apicid);
2152} 2155}
2153 2156
2154/* 2157/*
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 3c42865757e2..515ebb00a9fc 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -525,7 +525,8 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
525 return 1; 525 return 1;
526} 526}
527 527
528static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) 528static int
529es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
529{ 530{
530 unsigned int round = 0; 531 unsigned int round = 0;
531 int cpu, uninitialized_var(apicid); 532 int cpu, uninitialized_var(apicid);
@@ -539,31 +540,33 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
539 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 540 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
540 WARN(1, "Not a valid mask!"); 541 WARN(1, "Not a valid mask!");
541 542
542 return BAD_APICID; 543 return -EINVAL;
543 } 544 }
544 apicid = new_apicid; 545 apicid = new_apicid;
545 round++; 546 round++;
546 } 547 }
547 return apicid; 548 *dest_id = apicid;
549 return 0;
548} 550}
549 551
550static unsigned int 552static int
551es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, 553es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
552 const struct cpumask *andmask) 554 const struct cpumask *andmask,
555 unsigned int *apicid)
553{ 556{
554 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); 557 *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
555 cpumask_var_t cpumask; 558 cpumask_var_t cpumask;
556 559
557 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 560 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
558 return apicid; 561 return 0;
559 562
560 cpumask_and(cpumask, inmask, andmask); 563 cpumask_and(cpumask, inmask, andmask);
561 cpumask_and(cpumask, cpumask, cpu_online_mask); 564 cpumask_and(cpumask, cpumask, cpu_online_mask);
562 apicid = es7000_cpu_mask_to_apicid(cpumask); 565 es7000_cpu_mask_to_apicid(cpumask, apicid);
563 566
564 free_cpumask_var(cpumask); 567 free_cpumask_var(cpumask);
565 568
566 return apicid; 569 return 0;
567} 570}
568 571
569static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) 572static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4061a7dee5c9..0deb773404e5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1359,7 +1359,14 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1359 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1359 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1360 return; 1360 return;
1361 1361
1362 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1362 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
1363 &dest)) {
1364 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1365 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1366 __clear_irq_vector(irq, cfg);
1367
1368 return;
1369 }
1363 1370
1364 apic_printk(APIC_VERBOSE,KERN_DEBUG 1371 apic_printk(APIC_VERBOSE,KERN_DEBUG
1365 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1372 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1474,6 +1481,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1474 unsigned int pin, int vector) 1481 unsigned int pin, int vector)
1475{ 1482{
1476 struct IO_APIC_route_entry entry; 1483 struct IO_APIC_route_entry entry;
1484 unsigned int dest;
1477 1485
1478 if (irq_remapping_enabled) 1486 if (irq_remapping_enabled)
1479 return; 1487 return;
@@ -1484,9 +1492,12 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1484 * We use logical delivery to get the timer IRQ 1492 * We use logical delivery to get the timer IRQ
1485 * to the first CPU. 1493 * to the first CPU.
1486 */ 1494 */
1495 if (unlikely(apic->cpu_mask_to_apicid(apic->target_cpus(), &dest)))
1496 dest = BAD_APICID;
1497
1487 entry.dest_mode = apic->irq_dest_mode; 1498 entry.dest_mode = apic->irq_dest_mode;
1488 entry.mask = 0; /* don't mask IRQ for edge */ 1499 entry.mask = 0; /* don't mask IRQ for edge */
1489 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1500 entry.dest = dest;
1490 entry.delivery_mode = apic->irq_delivery_mode; 1501 entry.delivery_mode = apic->irq_delivery_mode;
1491 entry.polarity = 0; 1502 entry.polarity = 0;
1492 entry.trigger = 0; 1503 entry.trigger = 0;
@@ -2245,16 +2256,25 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2245 unsigned int *dest_id) 2256 unsigned int *dest_id)
2246{ 2257{
2247 struct irq_cfg *cfg = data->chip_data; 2258 struct irq_cfg *cfg = data->chip_data;
2259 unsigned int irq = data->irq;
2260 int err;
2248 2261
2249 if (!cpumask_intersects(mask, cpu_online_mask)) 2262 if (!cpumask_intersects(mask, cpu_online_mask))
2250 return -1; 2263 return -EINVAL;
2251 2264
2252 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2265 err = assign_irq_vector(irq, cfg, mask);
2253 return -1; 2266 if (err)
2267 return err;
2268
2269 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2270 if (err) {
2271 if (assign_irq_vector(irq, cfg, data->affinity))
2272 pr_err("Failed to recover vector for irq %d\n", irq);
2273 return err;
2274 }
2254 2275
2255 cpumask_copy(data->affinity, mask); 2276 cpumask_copy(data->affinity, mask);
2256 2277
2257 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2258 return 0; 2278 return 0;
2259} 2279}
2260 2280
@@ -3040,7 +3060,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3040 if (err) 3060 if (err)
3041 return err; 3061 return err;
3042 3062
3043 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3063 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3064 apic->target_cpus(), &dest);
3065 if (err)
3066 return err;
3044 3067
3045 if (irq_remapped(cfg)) { 3068 if (irq_remapped(cfg)) {
3046 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); 3069 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
@@ -3361,6 +3384,8 @@ static struct irq_chip ht_irq_chip = {
3361int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3384int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3362{ 3385{
3363 struct irq_cfg *cfg; 3386 struct irq_cfg *cfg;
3387 struct ht_irq_msg msg;
3388 unsigned dest;
3364 int err; 3389 int err;
3365 3390
3366 if (disable_apic) 3391 if (disable_apic)
@@ -3368,36 +3393,37 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3368 3393
3369 cfg = irq_cfg(irq); 3394 cfg = irq_cfg(irq);
3370 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3395 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3371 if (!err) { 3396 if (err)
3372 struct ht_irq_msg msg; 3397 return err;
3373 unsigned dest;
3374 3398
3375 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3399 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3376 apic->target_cpus()); 3400 apic->target_cpus(), &dest);
3401 if (err)
3402 return err;
3377 3403
3378 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3404 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3379 3405
3380 msg.address_lo = 3406 msg.address_lo =
3381 HT_IRQ_LOW_BASE | 3407 HT_IRQ_LOW_BASE |
3382 HT_IRQ_LOW_DEST_ID(dest) | 3408 HT_IRQ_LOW_DEST_ID(dest) |
3383 HT_IRQ_LOW_VECTOR(cfg->vector) | 3409 HT_IRQ_LOW_VECTOR(cfg->vector) |
3384 ((apic->irq_dest_mode == 0) ? 3410 ((apic->irq_dest_mode == 0) ?
3385 HT_IRQ_LOW_DM_PHYSICAL : 3411 HT_IRQ_LOW_DM_PHYSICAL :
3386 HT_IRQ_LOW_DM_LOGICAL) | 3412 HT_IRQ_LOW_DM_LOGICAL) |
3387 HT_IRQ_LOW_RQEOI_EDGE | 3413 HT_IRQ_LOW_RQEOI_EDGE |
3388 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3414 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3389 HT_IRQ_LOW_MT_FIXED : 3415 HT_IRQ_LOW_MT_FIXED :
3390 HT_IRQ_LOW_MT_ARBITRATED) | 3416 HT_IRQ_LOW_MT_ARBITRATED) |
3391 HT_IRQ_LOW_IRQ_MASKED; 3417 HT_IRQ_LOW_IRQ_MASKED;
3392 3418
3393 write_ht_irq_msg(irq, &msg); 3419 write_ht_irq_msg(irq, &msg);
3394 3420
3395 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3421 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3396 handle_edge_irq, "edge"); 3422 handle_edge_irq, "edge");
3397 3423
3398 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3424 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3399 } 3425
3400 return err; 3426 return 0;
3401} 3427}
3402#endif /* CONFIG_HT_IRQ */ 3428#endif /* CONFIG_HT_IRQ */
3403 3429
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index eb2d466fd81a..2b55514c328b 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -406,16 +406,20 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
406 * We use physical apicids here, not logical, so just return the default 406 * We use physical apicids here, not logical, so just return the default
407 * physical broadcast to stop people from breaking us 407 * physical broadcast to stop people from breaking us
408 */ 408 */
409static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) 409static int
410numaq_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
410{ 411{
411 return 0x0F; 412 *apicid = 0x0F;
413 return 0;
412} 414}
413 415
414static inline unsigned int 416static int
415numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 417numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
416 const struct cpumask *andmask) 418 const struct cpumask *andmask,
419 unsigned int *apicid)
417{ 420{
418 return 0x0F; 421 *apicid = 0x0F;
422 return 0;
419} 423}
420 424
421/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ 425/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 35d254c1fec2..5766d84f12d6 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -263,7 +263,8 @@ static int summit_check_phys_apicid_present(int physical_apicid)
263 return 1; 263 return 1;
264} 264}
265 265
266static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) 266static int
267summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
267{ 268{
268 unsigned int round = 0; 269 unsigned int round = 0;
269 int cpu, apicid = 0; 270 int cpu, apicid = 0;
@@ -276,30 +277,33 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
276 277
277 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 278 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
278 printk("%s: Not a valid mask!\n", __func__); 279 printk("%s: Not a valid mask!\n", __func__);
279 return BAD_APICID; 280 return -EINVAL;
280 } 281 }
281 apicid |= new_apicid; 282 apicid |= new_apicid;
282 round++; 283 round++;
283 } 284 }
284 return apicid; 285 *dest_id = apicid;
286 return 0;
285} 287}
286 288
287static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, 289static int
288 const struct cpumask *andmask) 290summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
291 const struct cpumask *andmask,
292 unsigned int *apicid)
289{ 293{
290 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); 294 *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
291 cpumask_var_t cpumask; 295 cpumask_var_t cpumask;
292 296
293 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 297 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
294 return apicid; 298 return 0;
295 299
296 cpumask_and(cpumask, inmask, andmask); 300 cpumask_and(cpumask, inmask, andmask);
297 cpumask_and(cpumask, cpumask, cpu_online_mask); 301 cpumask_and(cpumask, cpumask, cpu_online_mask);
298 apicid = summit_cpu_mask_to_apicid(cpumask); 302 summit_cpu_mask_to_apicid(cpumask, apicid);
299 303
300 free_cpumask_var(cpumask); 304 free_cpumask_var(cpumask);
301 305
302 return apicid; 306 return 0;
303} 307}
304 308
305/* 309/*
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 612622c47dfb..5f86f79335f4 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -96,24 +96,26 @@ static void x2apic_send_IPI_all(int vector)
96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
97} 97}
98 98
99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 99static int
100x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
100{ 101{
101 int cpu = cpumask_first(cpumask); 102 int cpu = cpumask_first(cpumask);
102 u32 dest = 0;
103 int i; 103 int i;
104 104
105 if (cpu > nr_cpu_ids) 105 if (cpu >= nr_cpu_ids)
106 return BAD_APICID; 106 return -EINVAL;
107 107
108 *apicid = 0;
108 for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu)) 109 for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu))
109 dest |= per_cpu(x86_cpu_to_logical_apicid, i); 110 *apicid |= per_cpu(x86_cpu_to_logical_apicid, i);
110 111
111 return dest; 112 return 0;
112} 113}
113 114
114static unsigned int 115static int
115x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 116x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
116 const struct cpumask *andmask) 117 const struct cpumask *andmask,
118 unsigned int *apicid)
117{ 119{
118 u32 dest = 0; 120 u32 dest = 0;
119 u16 cluster; 121 u16 cluster;
@@ -128,7 +130,7 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 } 130 }
129 131
130 if (!dest) 132 if (!dest)
131 return BAD_APICID; 133 return -EINVAL;
132 134
133 for_each_cpu_and(i, cpumask, andmask) { 135 for_each_cpu_and(i, cpumask, andmask) {
134 if (!cpumask_test_cpu(i, cpu_online_mask)) 136 if (!cpumask_test_cpu(i, cpu_online_mask))
@@ -138,7 +140,9 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
138 dest |= per_cpu(x86_cpu_to_logical_apicid, i); 140 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
139 } 141 }
140 142
141 return dest; 143 *apicid = dest;
144
145 return 0;
142} 146}
143 147
144static void init_x2apic_ldr(void) 148static void init_x2apic_ldr(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index df89a7d78748..2f3030fef31e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -269,23 +269,31 @@ static void uv_init_apic_ldr(void)
269{ 269{
270} 270}
271 271
272static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 272static inline int __uv_cpu_to_apicid(int cpu, unsigned int *apicid)
273{
274 if (likely((unsigned int)cpu < nr_cpu_ids)) {
275 *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
276 return 0;
277 } else {
278 return -EINVAL;
279 }
280}
281
282static int
283uv_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
273{ 284{
274 /* 285 /*
275 * We're using fixed IRQ delivery, can only return one phys APIC ID. 286 * We're using fixed IRQ delivery, can only return one phys APIC ID.
276 * May as well be the first. 287 * May as well be the first.
277 */ 288 */
278 int cpu = cpumask_first(cpumask); 289 int cpu = cpumask_first(cpumask);
279 290 return __uv_cpu_to_apicid(cpu, apicid);
280 if ((unsigned)cpu < nr_cpu_ids)
281 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
282 else
283 return BAD_APICID;
284} 291}
285 292
286static unsigned int 293static int
287uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 294uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
288 const struct cpumask *andmask) 295 const struct cpumask *andmask,
296 unsigned int *apicid)
289{ 297{
290 int cpu; 298 int cpu;
291 299
@@ -297,7 +305,8 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
297 if (cpumask_test_cpu(cpu, cpu_online_mask)) 305 if (cpumask_test_cpu(cpu, cpu_online_mask))
298 break; 306 break;
299 } 307 }
300 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; 308
309 return __uv_cpu_to_apicid(cpu, apicid);
301} 310}
302 311
303static unsigned int x2apic_get_apic_id(unsigned long x) 312static unsigned int x2apic_get_apic_id(unsigned long x)