diff options
| author | Yinghai Lu <yinghai@kernel.org> | 2009-04-27 20:57:18 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-04-28 06:21:14 -0400 |
| commit | 9ec4fa271faf2db3b8e1419c998da1ca6b094eb6 (patch) | |
| tree | 2dd817bd41526fc1d1252e50b0b1a0714ae34a7f | |
| parent | e25c2c873f59c57cf1c2b1979cc8fb01958305ef (diff) | |
irq, cpumask: correct CPUMASKS_OFFSTACK typo and fix fallout
CPUMASKS_OFFSTACK is not defined anywhere (it is CPUMASK_OFFSTACK).
It is a typo and init_allocate_desc_masks() is called before it set
affinity to all cpus...
Split init_alloc_desc_masks() into all_desc_masks() and init_desc_masks().
Also use CPUMASK_OFFSTACK in alloc_desc_masks().
[ Impact: fix smp_affinity copying/setup when moving irq_desc between CPUs ]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
LKML-Reference: <49F6546E.3040406@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | include/linux/irq.h | 27 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 9 | ||||
| -rw-r--r-- | kernel/irq/numa_migrate.c | 2 |
3 files changed, 25 insertions, 13 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index b7cbeed972e4..c4953cf27e5e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -424,27 +424,25 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
| 424 | 424 | ||
| 425 | #ifdef CONFIG_SMP | 425 | #ifdef CONFIG_SMP |
| 426 | /** | 426 | /** |
| 427 | * init_alloc_desc_masks - allocate cpumasks for irq_desc | 427 | * alloc_desc_masks - allocate cpumasks for irq_desc |
| 428 | * @desc: pointer to irq_desc struct | 428 | * @desc: pointer to irq_desc struct |
| 429 | * @cpu: cpu which will be handling the cpumasks | 429 | * @cpu: cpu which will be handling the cpumasks |
| 430 | * @boot: true if need bootmem | 430 | * @boot: true if need bootmem |
| 431 | * | 431 | * |
| 432 | * Allocates affinity and pending_mask cpumask if required. | 432 | * Allocates affinity and pending_mask cpumask if required. |
| 433 | * Returns true if successful (or not required). | 433 | * Returns true if successful (or not required). |
| 434 | * Side effect: affinity has all bits set, pending_mask has all bits clear. | ||
| 435 | */ | 434 | */ |
| 436 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 435 | static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu, |
| 437 | bool boot) | 436 | bool boot) |
| 438 | { | 437 | { |
| 438 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 439 | int node; | 439 | int node; |
| 440 | 440 | ||
| 441 | if (boot) { | 441 | if (boot) { |
| 442 | alloc_bootmem_cpumask_var(&desc->affinity); | 442 | alloc_bootmem_cpumask_var(&desc->affinity); |
| 443 | cpumask_setall(desc->affinity); | ||
| 444 | 443 | ||
| 445 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 444 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 446 | alloc_bootmem_cpumask_var(&desc->pending_mask); | 445 | alloc_bootmem_cpumask_var(&desc->pending_mask); |
| 447 | cpumask_clear(desc->pending_mask); | ||
| 448 | #endif | 446 | #endif |
| 449 | return true; | 447 | return true; |
| 450 | } | 448 | } |
| @@ -453,18 +451,25 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | |||
| 453 | 451 | ||
| 454 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) | 452 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) |
| 455 | return false; | 453 | return false; |
| 456 | cpumask_setall(desc->affinity); | ||
| 457 | 454 | ||
| 458 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 455 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 459 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { | 456 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { |
| 460 | free_cpumask_var(desc->affinity); | 457 | free_cpumask_var(desc->affinity); |
| 461 | return false; | 458 | return false; |
| 462 | } | 459 | } |
| 463 | cpumask_clear(desc->pending_mask); | 460 | #endif |
| 464 | #endif | 461 | #endif |
| 465 | return true; | 462 | return true; |
| 466 | } | 463 | } |
| 467 | 464 | ||
| 465 | static inline void init_desc_masks(struct irq_desc *desc) | ||
| 466 | { | ||
| 467 | cpumask_setall(desc->affinity); | ||
| 468 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 469 | cpumask_clear(desc->pending_mask); | ||
| 470 | #endif | ||
| 471 | } | ||
| 472 | |||
| 468 | /** | 473 | /** |
| 469 | * init_copy_desc_masks - copy cpumasks for irq_desc | 474 | * init_copy_desc_masks - copy cpumasks for irq_desc |
| 470 | * @old_desc: pointer to old irq_desc struct | 475 | * @old_desc: pointer to old irq_desc struct |
| @@ -478,7 +483,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | |||
| 478 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 483 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 479 | struct irq_desc *new_desc) | 484 | struct irq_desc *new_desc) |
| 480 | { | 485 | { |
| 481 | #ifdef CONFIG_CPUMASKS_OFFSTACK | 486 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 482 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 487 | cpumask_copy(new_desc->affinity, old_desc->affinity); |
| 483 | 488 | ||
| 484 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 489 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| @@ -499,12 +504,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc, | |||
| 499 | 504 | ||
| 500 | #else /* !CONFIG_SMP */ | 505 | #else /* !CONFIG_SMP */ |
| 501 | 506 | ||
| 502 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 507 | static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu, |
| 503 | bool boot) | 508 | bool boot) |
| 504 | { | 509 | { |
| 505 | return true; | 510 | return true; |
| 506 | } | 511 | } |
| 507 | 512 | ||
| 513 | static inline void init_desc_masks(struct irq_desc *desc) | ||
| 514 | { | ||
| 515 | } | ||
| 516 | |||
| 508 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 517 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 509 | struct irq_desc *new_desc) | 518 | struct irq_desc *new_desc) |
| 510 | { | 519 | { |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index d82142be8dd2..882c79800107 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -115,10 +115,11 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
| 115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
| 116 | BUG_ON(1); | 116 | BUG_ON(1); |
| 117 | } | 117 | } |
| 118 | if (!init_alloc_desc_masks(desc, cpu, false)) { | 118 | if (!alloc_desc_masks(desc, cpu, false)) { |
| 119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | 119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
| 120 | BUG_ON(1); | 120 | BUG_ON(1); |
| 121 | } | 121 | } |
| 122 | init_desc_masks(desc); | ||
| 122 | arch_init_chip_data(desc, cpu); | 123 | arch_init_chip_data(desc, cpu); |
| 123 | } | 124 | } |
| 124 | 125 | ||
| @@ -169,7 +170,8 @@ int __init early_irq_init(void) | |||
| 169 | desc[i].irq = i; | 170 | desc[i].irq = i; |
| 170 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | 171 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 171 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 172 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 172 | init_alloc_desc_masks(&desc[i], 0, true); | 173 | alloc_desc_masks(&desc[i], 0, true); |
| 174 | init_desc_masks(&desc[i]); | ||
| 173 | irq_desc_ptrs[i] = desc + i; | 175 | irq_desc_ptrs[i] = desc + i; |
| 174 | } | 176 | } |
| 175 | 177 | ||
| @@ -256,7 +258,8 @@ int __init early_irq_init(void) | |||
| 256 | 258 | ||
| 257 | for (i = 0; i < count; i++) { | 259 | for (i = 0; i < count; i++) { |
| 258 | desc[i].irq = i; | 260 | desc[i].irq = i; |
| 259 | init_alloc_desc_masks(&desc[i], 0, true); | 261 | alloc_desc_masks(&desc[i], 0, true); |
| 262 | init_desc_masks(&desc[i]); | ||
| 260 | desc[i].kstat_irqs = kstat_irqs_all[i]; | 263 | desc[i].kstat_irqs = kstat_irqs_all[i]; |
| 261 | } | 264 | } |
| 262 | return arch_early_irq_init(); | 265 | return arch_early_irq_init(); |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 44bbdcbaf8d2..5760d7251626 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -37,7 +37,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | |||
| 37 | struct irq_desc *desc, int cpu) | 37 | struct irq_desc *desc, int cpu) |
| 38 | { | 38 | { |
| 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
| 40 | if (!init_alloc_desc_masks(desc, cpu, false)) { | 40 | if (!alloc_desc_masks(desc, cpu, false)) { |
| 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " |
| 42 | "for migration.\n", irq); | 42 | "for migration.\n", irq); |
| 43 | return false; | 43 | return false; |
