diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-16 06:24:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 06:24:38 -0500 |
commit | c3895b01e80d120e8d19435f7cb1fa4c60c4e269 (patch) | |
tree | 426f36e999289eeb7a41f6f4ca8ff45bfc2372aa /arch/x86/kernel | |
parent | 3c68b4a8071fb11d905570d9b0e23034adc6c2bb (diff) | |
parent | 968ea6d80e395cf11a51143cfa1b9a14ada676df (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-ingo into cpus4096
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/hpet.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/i8253.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic.c | 95 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irq_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/mfgpt_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/vmiclock_32.c | 2 |
11 files changed, 63 insertions, 70 deletions
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 16f94879b525..b2cef49f3085 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta, | |||
141 | struct clock_event_device *evt); | 141 | struct clock_event_device *evt); |
142 | static void lapic_timer_setup(enum clock_event_mode mode, | 142 | static void lapic_timer_setup(enum clock_event_mode mode, |
143 | struct clock_event_device *evt); | 143 | struct clock_event_device *evt); |
144 | static void lapic_timer_broadcast(cpumask_t mask); | 144 | static void lapic_timer_broadcast(const struct cpumask *mask); |
145 | static void apic_pm_activate(void); | 145 | static void apic_pm_activate(void); |
146 | 146 | ||
147 | /* | 147 | /* |
@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
453 | /* | 453 | /* |
454 | * Local APIC timer broadcast function | 454 | * Local APIC timer broadcast function |
455 | */ | 455 | */ |
456 | static void lapic_timer_broadcast(cpumask_t mask) | 456 | static void lapic_timer_broadcast(const struct cpumask *mask) |
457 | { | 457 | { |
458 | #ifdef CONFIG_SMP | 458 | #ifdef CONFIG_SMP |
459 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | 459 | send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); |
460 | #endif | 460 | #endif |
461 | } | 461 | } |
462 | 462 | ||
@@ -469,7 +469,7 @@ static void __cpuinit setup_APIC_timer(void) | |||
469 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 469 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
470 | 470 | ||
471 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); | 471 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); |
472 | levt->cpumask = cpumask_of_cpu(smp_processor_id()); | 472 | levt->cpumask = cpumask_of(smp_processor_id()); |
473 | 473 | ||
474 | clockevents_register_device(levt); | 474 | clockevents_register_device(levt); |
475 | } | 475 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 3f46afbb1cf1..43ea612d3e9d 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -626,8 +626,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
626 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 626 | cpumask_t *mask = &this_leaf->shared_cpu_map; |
627 | 627 | ||
628 | n = type? | 628 | n = type? |
629 | cpulist_scnprintf(buf, len-2, *mask): | 629 | cpulist_scnprintf(buf, len-2, mask) : |
630 | cpumask_scnprintf(buf, len-2, *mask); | 630 | cpumask_scnprintf(buf, len-2, mask); |
631 | buf[n++] = '\n'; | 631 | buf[n++] = '\n'; |
632 | buf[n] = '\0'; | 632 | buf[n] = '\0'; |
633 | } | 633 | } |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 067d8de913f6..e76d7e272974 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -246,7 +246,7 @@ static void hpet_legacy_clockevent_register(void) | |||
246 | * Start hpet with the boot cpu mask and make it | 246 | * Start hpet with the boot cpu mask and make it |
247 | * global after the IO_APIC has been initialized. | 247 | * global after the IO_APIC has been initialized. |
248 | */ | 248 | */ |
249 | hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 249 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); |
250 | clockevents_register_device(&hpet_clockevent); | 250 | clockevents_register_device(&hpet_clockevent); |
251 | global_clock_event = &hpet_clockevent; | 251 | global_clock_event = &hpet_clockevent; |
252 | printk(KERN_DEBUG "hpet clockevent registered\n"); | 252 | printk(KERN_DEBUG "hpet clockevent registered\n"); |
@@ -301,7 +301,7 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
301 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | 301 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); |
302 | hpet_setup_msi_irq(hdev->irq); | 302 | hpet_setup_msi_irq(hdev->irq); |
303 | disable_irq(hdev->irq); | 303 | disable_irq(hdev->irq); |
304 | irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); | 304 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); |
305 | enable_irq(hdev->irq); | 305 | enable_irq(hdev->irq); |
306 | } | 306 | } |
307 | break; | 307 | break; |
@@ -449,7 +449,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
449 | return -1; | 449 | return -1; |
450 | 450 | ||
451 | disable_irq(dev->irq); | 451 | disable_irq(dev->irq); |
452 | irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); | 452 | irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); |
453 | enable_irq(dev->irq); | 453 | enable_irq(dev->irq); |
454 | 454 | ||
455 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | 455 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", |
@@ -500,7 +500,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |||
500 | /* 5 usec minimum reprogramming delta. */ | 500 | /* 5 usec minimum reprogramming delta. */ |
501 | evt->min_delta_ns = 5000; | 501 | evt->min_delta_ns = 5000; |
502 | 502 | ||
503 | evt->cpumask = cpumask_of_cpu(hdev->cpu); | 503 | evt->cpumask = cpumask_of(hdev->cpu); |
504 | clockevents_register_device(evt); | 504 | clockevents_register_device(evt); |
505 | } | 505 | } |
506 | 506 | ||
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index c1b5e3ece1f2..10f92fb532f3 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void) | |||
114 | * Start pit with the boot cpu mask and make it global after the | 114 | * Start pit with the boot cpu mask and make it global after the |
115 | * IO_APIC has been initialized. | 115 | * IO_APIC has been initialized. |
116 | */ | 116 | */ |
117 | pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 117 | pit_clockevent.cpumask = cpumask_of(smp_processor_id()); |
118 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, | 118 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, |
119 | pit_clockevent.shift); | 119 | pit_clockevent.shift); |
120 | pit_clockevent.max_delta_ns = | 120 | pit_clockevent.max_delta_ns = |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index a1a2e070f31a..d7f0993b8056 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -398,7 +398,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
398 | 398 | ||
399 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); | 399 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); |
400 | 400 | ||
401 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 401 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, |
402 | const struct cpumask *mask) | ||
402 | { | 403 | { |
403 | struct irq_cfg *cfg; | 404 | struct irq_cfg *cfg; |
404 | unsigned long flags; | 405 | unsigned long flags; |
@@ -406,18 +407,17 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
406 | cpumask_t tmp; | 407 | cpumask_t tmp; |
407 | unsigned int irq; | 408 | unsigned int irq; |
408 | 409 | ||
409 | cpus_and(tmp, mask, cpu_online_map); | 410 | if (!cpumask_intersects(mask, cpu_online_mask)) |
410 | if (cpus_empty(tmp)) | ||
411 | return; | 411 | return; |
412 | 412 | ||
413 | irq = desc->irq; | 413 | irq = desc->irq; |
414 | cfg = desc->chip_data; | 414 | cfg = desc->chip_data; |
415 | if (assign_irq_vector(irq, cfg, mask)) | 415 | if (assign_irq_vector(irq, cfg, *mask)) |
416 | return; | 416 | return; |
417 | 417 | ||
418 | set_extra_move_desc(desc, mask); | 418 | set_extra_move_desc(desc, *mask); |
419 | 419 | ||
420 | cpus_and(tmp, cfg->domain, mask); | 420 | cpumask_and(&tmp, &cfg->domain, mask); |
421 | dest = cpu_mask_to_apicid(tmp); | 421 | dest = cpu_mask_to_apicid(tmp); |
422 | /* | 422 | /* |
423 | * Only the high 8 bits are valid. | 423 | * Only the high 8 bits are valid. |
@@ -426,11 +426,12 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
426 | 426 | ||
427 | spin_lock_irqsave(&ioapic_lock, flags); | 427 | spin_lock_irqsave(&ioapic_lock, flags); |
428 | __target_IO_APIC_irq(irq, dest, cfg); | 428 | __target_IO_APIC_irq(irq, dest, cfg); |
429 | desc->affinity = mask; | 429 | cpumask_copy(&desc->affinity, mask); |
430 | spin_unlock_irqrestore(&ioapic_lock, flags); | 430 | spin_unlock_irqrestore(&ioapic_lock, flags); |
431 | } | 431 | } |
432 | 432 | ||
433 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 433 | static void set_ioapic_affinity_irq(unsigned int irq, |
434 | const struct cpumask *mask) | ||
434 | { | 435 | { |
435 | struct irq_desc *desc; | 436 | struct irq_desc *desc; |
436 | 437 | ||
@@ -2272,7 +2273,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
2272 | continue; | 2273 | continue; |
2273 | } | 2274 | } |
2274 | 2275 | ||
2275 | desc->chip->set_affinity(irq, desc->pending_mask); | 2276 | desc->chip->set_affinity(irq, &desc->pending_mask); |
2276 | spin_unlock_irqrestore(&desc->lock, flags); | 2277 | spin_unlock_irqrestore(&desc->lock, flags); |
2277 | } | 2278 | } |
2278 | } | 2279 | } |
@@ -2281,18 +2282,20 @@ static void ir_irq_migration(struct work_struct *work) | |||
2281 | /* | 2282 | /* |
2282 | * Migrates the IRQ destination in the process context. | 2283 | * Migrates the IRQ destination in the process context. |
2283 | */ | 2284 | */ |
2284 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2285 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
2286 | const struct cpumask *mask) | ||
2285 | { | 2287 | { |
2286 | if (desc->status & IRQ_LEVEL) { | 2288 | if (desc->status & IRQ_LEVEL) { |
2287 | desc->status |= IRQ_MOVE_PENDING; | 2289 | desc->status |= IRQ_MOVE_PENDING; |
2288 | desc->pending_mask = mask; | 2290 | cpumask_copy(&desc->pending_mask, mask); |
2289 | migrate_irq_remapped_level_desc(desc); | 2291 | migrate_irq_remapped_level_desc(desc); |
2290 | return; | 2292 | return; |
2291 | } | 2293 | } |
2292 | 2294 | ||
2293 | migrate_ioapic_irq_desc(desc, mask); | 2295 | migrate_ioapic_irq_desc(desc, mask); |
2294 | } | 2296 | } |
2295 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2297 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
2298 | const struct cpumask *mask) | ||
2296 | { | 2299 | { |
2297 | struct irq_desc *desc = irq_to_desc(irq); | 2300 | struct irq_desc *desc = irq_to_desc(irq); |
2298 | 2301 | ||
@@ -3146,7 +3149,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3146 | } | 3149 | } |
3147 | 3150 | ||
3148 | #ifdef CONFIG_SMP | 3151 | #ifdef CONFIG_SMP |
3149 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3152 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3150 | { | 3153 | { |
3151 | struct irq_desc *desc = irq_to_desc(irq); | 3154 | struct irq_desc *desc = irq_to_desc(irq); |
3152 | struct irq_cfg *cfg; | 3155 | struct irq_cfg *cfg; |
@@ -3154,17 +3157,16 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3154 | unsigned int dest; | 3157 | unsigned int dest; |
3155 | cpumask_t tmp; | 3158 | cpumask_t tmp; |
3156 | 3159 | ||
3157 | cpus_and(tmp, mask, cpu_online_map); | 3160 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3158 | if (cpus_empty(tmp)) | ||
3159 | return; | 3161 | return; |
3160 | 3162 | ||
3161 | cfg = desc->chip_data; | 3163 | cfg = desc->chip_data; |
3162 | if (assign_irq_vector(irq, cfg, mask)) | 3164 | if (assign_irq_vector(irq, cfg, *mask)) |
3163 | return; | 3165 | return; |
3164 | 3166 | ||
3165 | set_extra_move_desc(desc, mask); | 3167 | set_extra_move_desc(desc, *mask); |
3166 | 3168 | ||
3167 | cpus_and(tmp, cfg->domain, mask); | 3169 | cpumask_and(&tmp, &cfg->domain, mask); |
3168 | dest = cpu_mask_to_apicid(tmp); | 3170 | dest = cpu_mask_to_apicid(tmp); |
3169 | 3171 | ||
3170 | read_msi_msg_desc(desc, &msg); | 3172 | read_msi_msg_desc(desc, &msg); |
@@ -3175,14 +3177,15 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3175 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3177 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3176 | 3178 | ||
3177 | write_msi_msg_desc(desc, &msg); | 3179 | write_msi_msg_desc(desc, &msg); |
3178 | desc->affinity = mask; | 3180 | cpumask_copy(&desc->affinity, mask); |
3179 | } | 3181 | } |
3180 | #ifdef CONFIG_INTR_REMAP | 3182 | #ifdef CONFIG_INTR_REMAP |
3181 | /* | 3183 | /* |
3182 | * Migrate the MSI irq to another cpumask. This migration is | 3184 | * Migrate the MSI irq to another cpumask. This migration is |
3183 | * done in the process context using interrupt-remapping hardware. | 3185 | * done in the process context using interrupt-remapping hardware. |
3184 | */ | 3186 | */ |
3185 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3187 | static void ir_set_msi_irq_affinity(unsigned int irq, |
3188 | const struct cpumask *mask) | ||
3186 | { | 3189 | { |
3187 | struct irq_desc *desc = irq_to_desc(irq); | 3190 | struct irq_desc *desc = irq_to_desc(irq); |
3188 | struct irq_cfg *cfg; | 3191 | struct irq_cfg *cfg; |
@@ -3190,20 +3193,19 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3190 | cpumask_t tmp, cleanup_mask; | 3193 | cpumask_t tmp, cleanup_mask; |
3191 | struct irte irte; | 3194 | struct irte irte; |
3192 | 3195 | ||
3193 | cpus_and(tmp, mask, cpu_online_map); | 3196 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3194 | if (cpus_empty(tmp)) | ||
3195 | return; | 3197 | return; |
3196 | 3198 | ||
3197 | if (get_irte(irq, &irte)) | 3199 | if (get_irte(irq, &irte)) |
3198 | return; | 3200 | return; |
3199 | 3201 | ||
3200 | cfg = desc->chip_data; | 3202 | cfg = desc->chip_data; |
3201 | if (assign_irq_vector(irq, cfg, mask)) | 3203 | if (assign_irq_vector(irq, cfg, *mask)) |
3202 | return; | 3204 | return; |
3203 | 3205 | ||
3204 | set_extra_move_desc(desc, mask); | 3206 | set_extra_move_desc(desc, mask); |
3205 | 3207 | ||
3206 | cpus_and(tmp, cfg->domain, mask); | 3208 | cpumask_and(&tmp, &cfg->domain, mask); |
3207 | dest = cpu_mask_to_apicid(tmp); | 3209 | dest = cpu_mask_to_apicid(tmp); |
3208 | 3210 | ||
3209 | irte.vector = cfg->vector; | 3211 | irte.vector = cfg->vector; |
@@ -3226,7 +3228,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3226 | cfg->move_in_progress = 0; | 3228 | cfg->move_in_progress = 0; |
3227 | } | 3229 | } |
3228 | 3230 | ||
3229 | desc->affinity = mask; | 3231 | cpumask_copy(&desc->affinity, mask); |
3230 | } | 3232 | } |
3231 | 3233 | ||
3232 | #endif | 3234 | #endif |
@@ -3417,7 +3419,7 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3417 | 3419 | ||
3418 | #ifdef CONFIG_DMAR | 3420 | #ifdef CONFIG_DMAR |
3419 | #ifdef CONFIG_SMP | 3421 | #ifdef CONFIG_SMP |
3420 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3422 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3421 | { | 3423 | { |
3422 | struct irq_desc *desc = irq_to_desc(irq); | 3424 | struct irq_desc *desc = irq_to_desc(irq); |
3423 | struct irq_cfg *cfg; | 3425 | struct irq_cfg *cfg; |
@@ -3425,17 +3427,16 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3425 | unsigned int dest; | 3427 | unsigned int dest; |
3426 | cpumask_t tmp; | 3428 | cpumask_t tmp; |
3427 | 3429 | ||
3428 | cpus_and(tmp, mask, cpu_online_map); | 3430 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3429 | if (cpus_empty(tmp)) | ||
3430 | return; | 3431 | return; |
3431 | 3432 | ||
3432 | cfg = desc->chip_data; | 3433 | cfg = desc->chip_data; |
3433 | if (assign_irq_vector(irq, cfg, mask)) | 3434 | if (assign_irq_vector(irq, cfg, *mask)) |
3434 | return; | 3435 | return; |
3435 | 3436 | ||
3436 | set_extra_move_desc(desc, mask); | 3437 | set_extra_move_desc(desc, *mask); |
3437 | 3438 | ||
3438 | cpus_and(tmp, cfg->domain, mask); | 3439 | cpumask_and(&tmp, &cfg->domain, mask); |
3439 | dest = cpu_mask_to_apicid(tmp); | 3440 | dest = cpu_mask_to_apicid(tmp); |
3440 | 3441 | ||
3441 | dmar_msi_read(irq, &msg); | 3442 | dmar_msi_read(irq, &msg); |
@@ -3446,7 +3447,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3446 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3447 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3447 | 3448 | ||
3448 | dmar_msi_write(irq, &msg); | 3449 | dmar_msi_write(irq, &msg); |
3449 | desc->affinity = mask; | 3450 | cpumask_copy(&desc->affinity, mask); |
3450 | } | 3451 | } |
3451 | 3452 | ||
3452 | #endif /* CONFIG_SMP */ | 3453 | #endif /* CONFIG_SMP */ |
@@ -3480,7 +3481,7 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3480 | #ifdef CONFIG_HPET_TIMER | 3481 | #ifdef CONFIG_HPET_TIMER |
3481 | 3482 | ||
3482 | #ifdef CONFIG_SMP | 3483 | #ifdef CONFIG_SMP |
3483 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3484 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3484 | { | 3485 | { |
3485 | struct irq_desc *desc = irq_to_desc(irq); | 3486 | struct irq_desc *desc = irq_to_desc(irq); |
3486 | struct irq_cfg *cfg; | 3487 | struct irq_cfg *cfg; |
@@ -3488,17 +3489,16 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3488 | unsigned int dest; | 3489 | unsigned int dest; |
3489 | cpumask_t tmp; | 3490 | cpumask_t tmp; |
3490 | 3491 | ||
3491 | cpus_and(tmp, mask, cpu_online_map); | 3492 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3492 | if (cpus_empty(tmp)) | ||
3493 | return; | 3493 | return; |
3494 | 3494 | ||
3495 | cfg = desc->chip_data; | 3495 | cfg = desc->chip_data; |
3496 | if (assign_irq_vector(irq, cfg, mask)) | 3496 | if (assign_irq_vector(irq, cfg, *mask)) |
3497 | return; | 3497 | return; |
3498 | 3498 | ||
3499 | set_extra_move_desc(desc, mask); | 3499 | set_extra_move_desc(desc, *mask); |
3500 | 3500 | ||
3501 | cpus_and(tmp, cfg->domain, mask); | 3501 | cpumask_and(&tmp, &cfg->domain, mask); |
3502 | dest = cpu_mask_to_apicid(tmp); | 3502 | dest = cpu_mask_to_apicid(tmp); |
3503 | 3503 | ||
3504 | hpet_msi_read(irq, &msg); | 3504 | hpet_msi_read(irq, &msg); |
@@ -3509,7 +3509,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3509 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3509 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3510 | 3510 | ||
3511 | hpet_msi_write(irq, &msg); | 3511 | hpet_msi_write(irq, &msg); |
3512 | desc->affinity = mask; | 3512 | cpumask_copy(&desc->affinity, mask); |
3513 | } | 3513 | } |
3514 | 3514 | ||
3515 | #endif /* CONFIG_SMP */ | 3515 | #endif /* CONFIG_SMP */ |
@@ -3564,28 +3564,27 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3564 | write_ht_irq_msg(irq, &msg); | 3564 | write_ht_irq_msg(irq, &msg); |
3565 | } | 3565 | } |
3566 | 3566 | ||
3567 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3567 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3568 | { | 3568 | { |
3569 | struct irq_desc *desc = irq_to_desc(irq); | 3569 | struct irq_desc *desc = irq_to_desc(irq); |
3570 | struct irq_cfg *cfg; | 3570 | struct irq_cfg *cfg; |
3571 | unsigned int dest; | 3571 | unsigned int dest; |
3572 | cpumask_t tmp; | 3572 | cpumask_t tmp; |
3573 | 3573 | ||
3574 | cpus_and(tmp, mask, cpu_online_map); | 3574 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3575 | if (cpus_empty(tmp)) | ||
3576 | return; | 3575 | return; |
3577 | 3576 | ||
3578 | cfg = desc->chip_data; | 3577 | cfg = desc->chip_data; |
3579 | if (assign_irq_vector(irq, cfg, mask)) | 3578 | if (assign_irq_vector(irq, cfg, *mask)) |
3580 | return; | 3579 | return; |
3581 | 3580 | ||
3582 | set_extra_move_desc(desc, mask); | 3581 | set_extra_move_desc(desc, *mask); |
3583 | 3582 | ||
3584 | cpus_and(tmp, cfg->domain, mask); | 3583 | cpumask_and(&tmp, &cfg->domain, mask); |
3585 | dest = cpu_mask_to_apicid(tmp); | 3584 | dest = cpu_mask_to_apicid(tmp); |
3586 | 3585 | ||
3587 | target_ht_irq(irq, dest, cfg->vector); | 3586 | target_ht_irq(irq, dest, cfg->vector); |
3588 | desc->affinity = mask; | 3587 | cpumask_copy(&desc->affinity, mask); |
3589 | } | 3588 | } |
3590 | 3589 | ||
3591 | #endif | 3590 | #endif |
@@ -3928,10 +3927,10 @@ void __init setup_ioapic_dest(void) | |||
3928 | 3927 | ||
3929 | #ifdef CONFIG_INTR_REMAP | 3928 | #ifdef CONFIG_INTR_REMAP |
3930 | if (intr_remapping_enabled) | 3929 | if (intr_remapping_enabled) |
3931 | set_ir_ioapic_affinity_irq_desc(desc, mask); | 3930 | set_ir_ioapic_affinity_irq_desc(desc, &mask); |
3932 | else | 3931 | else |
3933 | #endif | 3932 | #endif |
3934 | set_ioapic_affinity_irq_desc(desc, mask); | 3933 | set_ioapic_affinity_irq_desc(desc, &mask); |
3935 | } | 3934 | } |
3936 | 3935 | ||
3937 | } | 3936 | } |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 119fc9c8ff7f..9cf9cbbf7a02 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -253,7 +253,7 @@ void fixup_irqs(cpumask_t map) | |||
253 | mask = map; | 253 | mask = map; |
254 | } | 254 | } |
255 | if (desc->chip->set_affinity) | 255 | if (desc->chip->set_affinity) |
256 | desc->chip->set_affinity(irq, mask); | 256 | desc->chip->set_affinity(irq, &mask); |
257 | else if (desc->action && !(warned++)) | 257 | else if (desc->action && !(warned++)) |
258 | printk("Cannot set affinity for irq %i\n", irq); | 258 | printk("Cannot set affinity for irq %i\n", irq); |
259 | } | 259 | } |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 900009c70591..27f2307b0a34 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -118,7 +118,7 @@ void fixup_irqs(cpumask_t map) | |||
118 | desc->chip->mask(irq); | 118 | desc->chip->mask(irq); |
119 | 119 | ||
120 | if (desc->chip->set_affinity) | 120 | if (desc->chip->set_affinity) |
121 | desc->chip->set_affinity(irq, mask); | 121 | desc->chip->set_affinity(irq, &mask); |
122 | else if (!(warned++)) | 122 | else if (!(warned++)) |
123 | set_affinity = 0; | 123 | set_affinity = 0; |
124 | 124 | ||
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 3b599518c322..c12314c9e86f 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = { | |||
287 | .set_mode = mfgpt_set_mode, | 287 | .set_mode = mfgpt_set_mode, |
288 | .set_next_event = mfgpt_next_event, | 288 | .set_next_event = mfgpt_next_event, |
289 | .rating = 250, | 289 | .rating = 250, |
290 | .cpumask = CPU_MASK_ALL, | 290 | .cpumask = cpu_all_mask, |
291 | .shift = 32 | 291 | .shift = 32 |
292 | }; | 292 | }; |
293 | 293 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ae0c0d3bb770..1c2084291f97 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -282,7 +282,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) | |||
282 | else | 282 | else |
283 | cpu_clear(cpu, *mask); | 283 | cpu_clear(cpu, *mask); |
284 | 284 | ||
285 | cpulist_scnprintf(buf, sizeof(buf), *mask); | 285 | cpulist_scnprintf(buf, sizeof(buf), mask); |
286 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | 286 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
287 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); | 287 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); |
288 | } | 288 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0e9f446269f4..9d58134e0231 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
102 | /* Last level cache ID of each logical CPU */ | 102 | /* Last level cache ID of each logical CPU */ |
103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | 103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
104 | 104 | ||
105 | /* bitmap of online cpus */ | ||
106 | cpumask_t cpu_online_map __read_mostly; | ||
107 | EXPORT_SYMBOL(cpu_online_map); | ||
108 | |||
109 | cpumask_t cpu_callin_map; | 105 | cpumask_t cpu_callin_map; |
110 | cpumask_t cpu_callout_map; | 106 | cpumask_t cpu_callout_map; |
111 | cpumask_t cpu_possible_map; | ||
112 | EXPORT_SYMBOL(cpu_possible_map); | ||
113 | 107 | ||
114 | /* representing HT siblings of each logical CPU */ | 108 | /* representing HT siblings of each logical CPU */ |
115 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | 109 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 254ee07f8635..c4c1f9e09402 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void) | |||
226 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ | 226 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ |
227 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); | 227 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); |
228 | evt->min_delta_ns = clockevent_delta2ns(1, evt); | 228 | evt->min_delta_ns = clockevent_delta2ns(1, evt); |
229 | evt->cpumask = cpumask_of_cpu(cpu); | 229 | evt->cpumask = cpumask_of(cpu); |
230 | 230 | ||
231 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", | 231 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", |
232 | evt->name, evt->mult, evt->shift); | 232 | evt->name, evt->mult, evt->shift); |