aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic.c8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/hpet.c8
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/io_apic.c81
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c10
-rw-r--r--arch/x86/kernel/smpboot.c17
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
11 files changed, 67 insertions, 71 deletions
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index b5229affb953..6107b41da9a5 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -142,7 +142,7 @@ static int lapic_next_event(unsigned long delta,
142 struct clock_event_device *evt); 142 struct clock_event_device *evt);
143static void lapic_timer_setup(enum clock_event_mode mode, 143static void lapic_timer_setup(enum clock_event_mode mode,
144 struct clock_event_device *evt); 144 struct clock_event_device *evt);
145static void lapic_timer_broadcast(cpumask_t mask); 145static void lapic_timer_broadcast(const struct cpumask *mask);
146static void apic_pm_activate(void); 146static void apic_pm_activate(void);
147 147
148/* 148/*
@@ -455,10 +455,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
455/* 455/*
456 * Local APIC timer broadcast function 456 * Local APIC timer broadcast function
457 */ 457 */
458static void lapic_timer_broadcast(cpumask_t mask) 458static void lapic_timer_broadcast(const struct cpumask *mask)
459{ 459{
460#ifdef CONFIG_SMP 460#ifdef CONFIG_SMP
461 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 461 send_IPI_mask(*mask, LOCAL_TIMER_VECTOR);
462#endif 462#endif
463} 463}
464 464
@@ -471,7 +471,7 @@ static void __cpuinit setup_APIC_timer(void)
471 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 471 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
472 472
473 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 473 memcpy(levt, &lapic_clockevent, sizeof(*levt));
474 levt->cpumask = cpumask_of_cpu(smp_processor_id()); 474 levt->cpumask = cpumask_of(smp_processor_id());
475 475
476 clockevents_register_device(levt); 476 clockevents_register_device(levt);
477} 477}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 68b5d8681cbb..15cf14e9bf26 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -626,8 +626,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
626 cpumask_t *mask = &this_leaf->shared_cpu_map; 626 cpumask_t *mask = &this_leaf->shared_cpu_map;
627 627
628 n = type? 628 n = type?
629 cpulist_scnprintf(buf, len-2, *mask): 629 cpulist_scnprintf(buf, len-2, mask) :
630 cpumask_scnprintf(buf, len-2, *mask); 630 cpumask_scnprintf(buf, len-2, mask);
631 buf[n++] = '\n'; 631 buf[n++] = '\n';
632 buf[n] = '\0'; 632 buf[n] = '\0';
633 } 633 }
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 3f0a3edf0a57..b5310ff1259e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -248,7 +248,7 @@ static void hpet_legacy_clockevent_register(void)
248 * Start hpet with the boot cpu mask and make it 248 * Start hpet with the boot cpu mask and make it
249 * global after the IO_APIC has been initialized. 249 * global after the IO_APIC has been initialized.
250 */ 250 */
251 hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 251 hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
252 clockevents_register_device(&hpet_clockevent); 252 clockevents_register_device(&hpet_clockevent);
253 global_clock_event = &hpet_clockevent; 253 global_clock_event = &hpet_clockevent;
254 printk(KERN_DEBUG "hpet clockevent registered\n"); 254 printk(KERN_DEBUG "hpet clockevent registered\n");
@@ -303,7 +303,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
304 hpet_setup_msi_irq(hdev->irq); 304 hpet_setup_msi_irq(hdev->irq);
305 disable_irq(hdev->irq); 305 disable_irq(hdev->irq);
306 irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); 306 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
307 enable_irq(hdev->irq); 307 enable_irq(hdev->irq);
308 } 308 }
309 break; 309 break;
@@ -451,7 +451,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
451 return -1; 451 return -1;
452 452
453 disable_irq(dev->irq); 453 disable_irq(dev->irq);
454 irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); 454 irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
455 enable_irq(dev->irq); 455 enable_irq(dev->irq);
456 456
457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
@@ -502,7 +502,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
502 /* 5 usec minimum reprogramming delta. */ 502 /* 5 usec minimum reprogramming delta. */
503 evt->min_delta_ns = 5000; 503 evt->min_delta_ns = 5000;
504 504
505 evt->cpumask = cpumask_of_cpu(hdev->cpu); 505 evt->cpumask = cpumask_of(hdev->cpu);
506 clockevents_register_device(evt); 506 clockevents_register_device(evt);
507} 507}
508 508
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index c1b5e3ece1f2..10f92fb532f3 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void)
114 * Start pit with the boot cpu mask and make it global after the 114 * Start pit with the boot cpu mask and make it global after the
115 * IO_APIC has been initialized. 115 * IO_APIC has been initialized.
116 */ 116 */
117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 pit_clockevent.cpumask = cpumask_of(smp_processor_id());
118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
119 pit_clockevent.shift); 119 pit_clockevent.shift);
120 pit_clockevent.max_delta_ns = 120 pit_clockevent.max_delta_ns =
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 679e7bbbbcd6..6dbf427175ff 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -361,7 +361,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
361 361
362static int assign_irq_vector(int irq, cpumask_t mask); 362static int assign_irq_vector(int irq, cpumask_t mask);
363 363
364static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 364static void set_ioapic_affinity_irq(unsigned int irq,
365 const struct cpumask *mask)
365{ 366{
366 struct irq_cfg *cfg; 367 struct irq_cfg *cfg;
367 unsigned long flags; 368 unsigned long flags;
@@ -369,15 +370,14 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
369 cpumask_t tmp; 370 cpumask_t tmp;
370 struct irq_desc *desc; 371 struct irq_desc *desc;
371 372
372 cpus_and(tmp, mask, cpu_online_map); 373 if (!cpumask_intersects(mask, cpu_online_mask))
373 if (cpus_empty(tmp))
374 return; 374 return;
375 375
376 cfg = irq_cfg(irq); 376 cfg = irq_cfg(irq);
377 if (assign_irq_vector(irq, mask)) 377 if (assign_irq_vector(irq, *mask))
378 return; 378 return;
379 379
380 cpus_and(tmp, cfg->domain, mask); 380 cpumask_and(&tmp, &cfg->domain, mask);
381 dest = cpu_mask_to_apicid(tmp); 381 dest = cpu_mask_to_apicid(tmp);
382 /* 382 /*
383 * Only the high 8 bits are valid. 383 * Only the high 8 bits are valid.
@@ -387,7 +387,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
387 desc = irq_to_desc(irq); 387 desc = irq_to_desc(irq);
388 spin_lock_irqsave(&ioapic_lock, flags); 388 spin_lock_irqsave(&ioapic_lock, flags);
389 __target_IO_APIC_irq(irq, dest, cfg->vector); 389 __target_IO_APIC_irq(irq, dest, cfg->vector);
390 desc->affinity = mask; 390 cpumask_copy(&desc->affinity, mask);
391 spin_unlock_irqrestore(&ioapic_lock, flags); 391 spin_unlock_irqrestore(&ioapic_lock, flags);
392} 392}
393#endif /* CONFIG_SMP */ 393#endif /* CONFIG_SMP */
@@ -2189,7 +2189,7 @@ static void ir_irq_migration(struct work_struct *work)
2189 continue; 2189 continue;
2190 } 2190 }
2191 2191
2192 desc->chip->set_affinity(irq, desc->pending_mask); 2192 desc->chip->set_affinity(irq, &desc->pending_mask);
2193 spin_unlock_irqrestore(&desc->lock, flags); 2193 spin_unlock_irqrestore(&desc->lock, flags);
2194 } 2194 }
2195 } 2195 }
@@ -2198,18 +2198,19 @@ static void ir_irq_migration(struct work_struct *work)
2198/* 2198/*
2199 * Migrates the IRQ destination in the process context. 2199 * Migrates the IRQ destination in the process context.
2200 */ 2200 */
2201static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2201static void set_ir_ioapic_affinity_irq(unsigned int irq,
2202 const struct cpumask *mask)
2202{ 2203{
2203 struct irq_desc *desc = irq_to_desc(irq); 2204 struct irq_desc *desc = irq_to_desc(irq);
2204 2205
2205 if (desc->status & IRQ_LEVEL) { 2206 if (desc->status & IRQ_LEVEL) {
2206 desc->status |= IRQ_MOVE_PENDING; 2207 desc->status |= IRQ_MOVE_PENDING;
2207 desc->pending_mask = mask; 2208 cpumask_copy(&desc->pending_mask, mask);
2208 migrate_irq_remapped_level(irq); 2209 migrate_irq_remapped_level(irq);
2209 return; 2210 return;
2210 } 2211 }
2211 2212
2212 migrate_ioapic_irq(irq, mask); 2213 migrate_ioapic_irq(irq, *mask);
2213} 2214}
2214#endif 2215#endif
2215 2216
@@ -3026,7 +3027,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3026} 3027}
3027 3028
3028#ifdef CONFIG_SMP 3029#ifdef CONFIG_SMP
3029static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3030static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3030{ 3031{
3031 struct irq_cfg *cfg; 3032 struct irq_cfg *cfg;
3032 struct msi_msg msg; 3033 struct msi_msg msg;
@@ -3034,15 +3035,14 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3034 cpumask_t tmp; 3035 cpumask_t tmp;
3035 struct irq_desc *desc; 3036 struct irq_desc *desc;
3036 3037
3037 cpus_and(tmp, mask, cpu_online_map); 3038 if (!cpumask_intersects(mask, cpu_online_mask))
3038 if (cpus_empty(tmp))
3039 return; 3039 return;
3040 3040
3041 if (assign_irq_vector(irq, mask)) 3041 if (assign_irq_vector(irq, *mask))
3042 return; 3042 return;
3043 3043
3044 cfg = irq_cfg(irq); 3044 cfg = irq_cfg(irq);
3045 cpus_and(tmp, cfg->domain, mask); 3045 cpumask_and(&tmp, &cfg->domain, mask);
3046 dest = cpu_mask_to_apicid(tmp); 3046 dest = cpu_mask_to_apicid(tmp);
3047 3047
3048 read_msi_msg(irq, &msg); 3048 read_msi_msg(irq, &msg);
@@ -3054,7 +3054,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3054 3054
3055 write_msi_msg(irq, &msg); 3055 write_msi_msg(irq, &msg);
3056 desc = irq_to_desc(irq); 3056 desc = irq_to_desc(irq);
3057 desc->affinity = mask; 3057 cpumask_copy(&desc->affinity, mask);
3058} 3058}
3059 3059
3060#ifdef CONFIG_INTR_REMAP 3060#ifdef CONFIG_INTR_REMAP
@@ -3062,7 +3062,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3062 * Migrate the MSI irq to another cpumask. This migration is 3062 * Migrate the MSI irq to another cpumask. This migration is
3063 * done in the process context using interrupt-remapping hardware. 3063 * done in the process context using interrupt-remapping hardware.
3064 */ 3064 */
3065static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3065static void ir_set_msi_irq_affinity(unsigned int irq,
3066 const struct cpumask *mask)
3066{ 3067{
3067 struct irq_cfg *cfg; 3068 struct irq_cfg *cfg;
3068 unsigned int dest; 3069 unsigned int dest;
@@ -3070,18 +3071,17 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3070 struct irte irte; 3071 struct irte irte;
3071 struct irq_desc *desc; 3072 struct irq_desc *desc;
3072 3073
3073 cpus_and(tmp, mask, cpu_online_map); 3074 if (!cpumask_intersects(mask, cpu_online_mask))
3074 if (cpus_empty(tmp))
3075 return; 3075 return;
3076 3076
3077 if (get_irte(irq, &irte)) 3077 if (get_irte(irq, &irte))
3078 return; 3078 return;
3079 3079
3080 if (assign_irq_vector(irq, mask)) 3080 if (assign_irq_vector(irq, *mask))
3081 return; 3081 return;
3082 3082
3083 cfg = irq_cfg(irq); 3083 cfg = irq_cfg(irq);
3084 cpus_and(tmp, cfg->domain, mask); 3084 cpumask_and(&tmp, &cfg->domain, mask);
3085 dest = cpu_mask_to_apicid(tmp); 3085 dest = cpu_mask_to_apicid(tmp);
3086 3086
3087 irte.vector = cfg->vector; 3087 irte.vector = cfg->vector;
@@ -3105,7 +3105,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3105 } 3105 }
3106 3106
3107 desc = irq_to_desc(irq); 3107 desc = irq_to_desc(irq);
3108 desc->affinity = mask; 3108 cpumask_copy(&desc->affinity, mask);
3109} 3109}
3110#endif 3110#endif
3111#endif /* CONFIG_SMP */ 3111#endif /* CONFIG_SMP */
@@ -3307,7 +3307,7 @@ void arch_teardown_msi_irq(unsigned int irq)
3307 3307
3308#ifdef CONFIG_DMAR 3308#ifdef CONFIG_DMAR
3309#ifdef CONFIG_SMP 3309#ifdef CONFIG_SMP
3310static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3310static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3311{ 3311{
3312 struct irq_cfg *cfg; 3312 struct irq_cfg *cfg;
3313 struct msi_msg msg; 3313 struct msi_msg msg;
@@ -3315,15 +3315,14 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3315 cpumask_t tmp; 3315 cpumask_t tmp;
3316 struct irq_desc *desc; 3316 struct irq_desc *desc;
3317 3317
3318 cpus_and(tmp, mask, cpu_online_map); 3318 if (!cpumask_intersects(mask, cpu_online_mask))
3319 if (cpus_empty(tmp))
3320 return; 3319 return;
3321 3320
3322 if (assign_irq_vector(irq, mask)) 3321 if (assign_irq_vector(irq, *mask))
3323 return; 3322 return;
3324 3323
3325 cfg = irq_cfg(irq); 3324 cfg = irq_cfg(irq);
3326 cpus_and(tmp, cfg->domain, mask); 3325 cpumask_and(&tmp, &cfg->domain, mask);
3327 dest = cpu_mask_to_apicid(tmp); 3326 dest = cpu_mask_to_apicid(tmp);
3328 3327
3329 dmar_msi_read(irq, &msg); 3328 dmar_msi_read(irq, &msg);
@@ -3335,7 +3334,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3335 3334
3336 dmar_msi_write(irq, &msg); 3335 dmar_msi_write(irq, &msg);
3337 desc = irq_to_desc(irq); 3336 desc = irq_to_desc(irq);
3338 desc->affinity = mask; 3337 cpumask_copy(&desc->affinity, mask);
3339} 3338}
3340#endif /* CONFIG_SMP */ 3339#endif /* CONFIG_SMP */
3341 3340
@@ -3368,7 +3367,7 @@ int arch_setup_dmar_msi(unsigned int irq)
3368#ifdef CONFIG_HPET_TIMER 3367#ifdef CONFIG_HPET_TIMER
3369 3368
3370#ifdef CONFIG_SMP 3369#ifdef CONFIG_SMP
3371static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3370static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3372{ 3371{
3373 struct irq_cfg *cfg; 3372 struct irq_cfg *cfg;
3374 struct irq_desc *desc; 3373 struct irq_desc *desc;
@@ -3376,15 +3375,14 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3376 unsigned int dest; 3375 unsigned int dest;
3377 cpumask_t tmp; 3376 cpumask_t tmp;
3378 3377
3379 cpus_and(tmp, mask, cpu_online_map); 3378 if (!cpumask_intersects(mask, cpu_online_mask))
3380 if (cpus_empty(tmp))
3381 return; 3379 return;
3382 3380
3383 if (assign_irq_vector(irq, mask)) 3381 if (assign_irq_vector(irq, *mask))
3384 return; 3382 return;
3385 3383
3386 cfg = irq_cfg(irq); 3384 cfg = irq_cfg(irq);
3387 cpus_and(tmp, cfg->domain, mask); 3385 cpumask_and(&tmp, &cfg->domain, mask);
3388 dest = cpu_mask_to_apicid(tmp); 3386 dest = cpu_mask_to_apicid(tmp);
3389 3387
3390 hpet_msi_read(irq, &msg); 3388 hpet_msi_read(irq, &msg);
@@ -3396,7 +3394,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3396 3394
3397 hpet_msi_write(irq, &msg); 3395 hpet_msi_write(irq, &msg);
3398 desc = irq_to_desc(irq); 3396 desc = irq_to_desc(irq);
3399 desc->affinity = mask; 3397 cpumask_copy(&desc->affinity, mask);
3400} 3398}
3401#endif /* CONFIG_SMP */ 3399#endif /* CONFIG_SMP */
3402 3400
@@ -3450,27 +3448,26 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3450 write_ht_irq_msg(irq, &msg); 3448 write_ht_irq_msg(irq, &msg);
3451} 3449}
3452 3450
3453static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3451static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3454{ 3452{
3455 struct irq_cfg *cfg; 3453 struct irq_cfg *cfg;
3456 unsigned int dest; 3454 unsigned int dest;
3457 cpumask_t tmp; 3455 cpumask_t tmp;
3458 struct irq_desc *desc; 3456 struct irq_desc *desc;
3459 3457
3460 cpus_and(tmp, mask, cpu_online_map); 3458 if (!cpumask_intersects(mask, cpu_online_mask))
3461 if (cpus_empty(tmp))
3462 return; 3459 return;
3463 3460
3464 if (assign_irq_vector(irq, mask)) 3461 if (assign_irq_vector(irq, *mask))
3465 return; 3462 return;
3466 3463
3467 cfg = irq_cfg(irq); 3464 cfg = irq_cfg(irq);
3468 cpus_and(tmp, cfg->domain, mask); 3465 cpumask_and(&tmp, &cfg->domain, mask);
3469 dest = cpu_mask_to_apicid(tmp); 3466 dest = cpu_mask_to_apicid(tmp);
3470 3467
3471 target_ht_irq(irq, dest, cfg->vector); 3468 target_ht_irq(irq, dest, cfg->vector);
3472 desc = irq_to_desc(irq); 3469 desc = irq_to_desc(irq);
3473 desc->affinity = mask; 3470 cpumask_copy(&desc->affinity, mask);
3474} 3471}
3475#endif 3472#endif
3476 3473
@@ -3793,10 +3790,10 @@ void __init setup_ioapic_dest(void)
3793 3790
3794#ifdef CONFIG_INTR_REMAP 3791#ifdef CONFIG_INTR_REMAP
3795 if (intr_remapping_enabled) 3792 if (intr_remapping_enabled)
3796 set_ir_ioapic_affinity_irq(irq, mask); 3793 set_ir_ioapic_affinity_irq(irq, &mask);
3797 else 3794 else
3798#endif 3795#endif
3799 set_ioapic_affinity_irq(irq, mask); 3796 set_ioapic_affinity_irq(irq, &mask);
3800 } 3797 }
3801 3798
3802 } 3799 }
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a51382672de0..87870a49be4e 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -251,7 +251,7 @@ void fixup_irqs(cpumask_t map)
251 mask = map; 251 mask = map;
252 } 252 }
253 if (desc->chip->set_affinity) 253 if (desc->chip->set_affinity)
254 desc->chip->set_affinity(irq, mask); 254 desc->chip->set_affinity(irq, &mask);
255 else if (desc->action && !(warned++)) 255 else if (desc->action && !(warned++))
256 printk("Cannot set affinity for irq %i\n", irq); 256 printk("Cannot set affinity for irq %i\n", irq);
257 } 257 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1df869e5bd0b..8cbd069e5b41 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -113,7 +113,7 @@ void fixup_irqs(cpumask_t map)
113 desc->chip->mask(irq); 113 desc->chip->mask(irq);
114 114
115 if (desc->chip->set_affinity) 115 if (desc->chip->set_affinity)
116 desc->chip->set_affinity(irq, mask); 116 desc->chip->set_affinity(irq, &mask);
117 else if (!(warned++)) 117 else if (!(warned++))
118 set_affinity = 0; 118 set_affinity = 0;
119 119
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 3b599518c322..c12314c9e86f 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = {
287 .set_mode = mfgpt_set_mode, 287 .set_mode = mfgpt_set_mode,
288 .set_next_event = mfgpt_next_event, 288 .set_next_event = mfgpt_next_event,
289 .rating = 250, 289 .rating = 250,
290 .cpumask = CPU_MASK_ALL, 290 .cpumask = cpu_all_mask,
291 .shift = 32 291 .shift = 32
292}; 292};
293 293
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ae0c0d3bb770..8e8b1193add5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -282,7 +282,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
282 else 282 else
283 cpu_clear(cpu, *mask); 283 cpu_clear(cpu, *mask);
284 284
285 cpulist_scnprintf(buf, sizeof(buf), *mask); 285 cpulist_scnprintf(buf, sizeof(buf), mask);
286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
288 } 288 }
@@ -334,25 +334,25 @@ static const cpumask_t cpu_mask_none;
334/* 334/*
335 * Returns a pointer to the bitmask of CPUs on Node 'node'. 335 * Returns a pointer to the bitmask of CPUs on Node 'node'.
336 */ 336 */
337const cpumask_t *_node_to_cpumask_ptr(int node) 337const cpumask_t *cpumask_of_node(int node)
338{ 338{
339 if (node_to_cpumask_map == NULL) { 339 if (node_to_cpumask_map == NULL) {
340 printk(KERN_WARNING 340 printk(KERN_WARNING
341 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 341 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
342 node); 342 node);
343 dump_stack(); 343 dump_stack();
344 return (const cpumask_t *)&cpu_online_map; 344 return (const cpumask_t *)&cpu_online_map;
345 } 345 }
346 if (node >= nr_node_ids) { 346 if (node >= nr_node_ids) {
347 printk(KERN_WARNING 347 printk(KERN_WARNING
348 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 348 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
349 node, nr_node_ids); 349 node, nr_node_ids);
350 dump_stack(); 350 dump_stack();
351 return &cpu_mask_none; 351 return &cpu_mask_none;
352 } 352 }
353 return &node_to_cpumask_map[node]; 353 return &node_to_cpumask_map[node];
354} 354}
355EXPORT_SYMBOL(_node_to_cpumask_ptr); 355EXPORT_SYMBOL(cpumask_of_node);
356 356
357/* 357/*
358 * Returns a bitmask of CPUs on Node 'node'. 358 * Returns a bitmask of CPUs on Node 'node'.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f8500c969442..c5392058cd07 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings);
102/* Last level cache ID of each logical CPU */ 102/* Last level cache ID of each logical CPU */
103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
104 104
105/* bitmap of online cpus */
106cpumask_t cpu_online_map __read_mostly;
107EXPORT_SYMBOL(cpu_online_map);
108
109cpumask_t cpu_callin_map; 105cpumask_t cpu_callin_map;
110cpumask_t cpu_callout_map; 106cpumask_t cpu_callout_map;
111cpumask_t cpu_possible_map;
112EXPORT_SYMBOL(cpu_possible_map);
113 107
114/* representing HT siblings of each logical CPU */ 108/* representing HT siblings of each logical CPU */
115DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 109DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
@@ -502,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
502} 496}
503 497
504/* maps the cpu to the sched domain representing multi-core */ 498/* maps the cpu to the sched domain representing multi-core */
505cpumask_t cpu_coregroup_map(int cpu) 499const struct cpumask *cpu_coregroup_mask(int cpu)
506{ 500{
507 struct cpuinfo_x86 *c = &cpu_data(cpu); 501 struct cpuinfo_x86 *c = &cpu_data(cpu);
508 /* 502 /*
@@ -510,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
510 * And for power savings, we return cpu_core_map 504 * And for power savings, we return cpu_core_map
511 */ 505 */
512 if (sched_mc_power_savings || sched_smt_power_savings) 506 if (sched_mc_power_savings || sched_smt_power_savings)
513 return per_cpu(cpu_core_map, cpu); 507 return &per_cpu(cpu_core_map, cpu);
514 else 508 else
515 return c->llc_shared_map; 509 return &c->llc_shared_map;
510}
511
512cpumask_t cpu_coregroup_map(int cpu)
513{
514 return *cpu_coregroup_mask(cpu);
516} 515}
517 516
518static void impress_friends(void) 517static void impress_friends(void)
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 254ee07f8635..c4c1f9e09402 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void)
226 /* Upper bound is clockevent's use of ulong for cycle deltas. */ 226 /* Upper bound is clockevent's use of ulong for cycle deltas. */
227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); 227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt);
228 evt->min_delta_ns = clockevent_delta2ns(1, evt); 228 evt->min_delta_ns = clockevent_delta2ns(1, evt);
229 evt->cpumask = cpumask_of_cpu(cpu); 229 evt->cpumask = cpumask_of(cpu);
230 230
231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", 231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n",
232 evt->name, evt->mult, evt->shift); 232 evt->name, evt->mult, evt->shift);