diff options
| author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-15 21:07:59 -0400 |
|---|---|---|
| committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-15 21:07:59 -0400 |
| commit | 84c3d4aaec3338201b449034beac41635866bddf (patch) | |
| tree | 3412951682fb2dd4feb8a5532f8efbaf8b345933 /kernel/irq | |
| parent | 43d2548bb2ef7e6d753f91468a746784041e522d (diff) | |
| parent | fafa3a3f16723997f039a0193997464d66dafd8f (diff) | |
Merge commit 'origin/master'
Manual merge of:
arch/powerpc/Kconfig
arch/powerpc/kernel/stacktrace.c
arch/powerpc/mm/slice.c
arch/ppc/kernel/smp.c
Diffstat (limited to 'kernel/irq')
| -rw-r--r-- | kernel/irq/manage.c | 33 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 59 |
2 files changed, 83 insertions, 9 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 46d6611a33bb..77a51be36010 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | 17 | ||
| 18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
| 19 | 19 | ||
| 20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | ||
| 21 | |||
| 20 | /** | 22 | /** |
| 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| 22 | * @irq: interrupt number to wait for | 24 | * @irq: interrupt number to wait for |
| @@ -95,6 +97,27 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
| 95 | return 0; | 97 | return 0; |
| 96 | } | 98 | } |
| 97 | 99 | ||
| 100 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | ||
| 101 | /* | ||
| 102 | * Generic version of the affinity autoselector. | ||
| 103 | */ | ||
| 104 | int irq_select_affinity(unsigned int irq) | ||
| 105 | { | ||
| 106 | cpumask_t mask; | ||
| 107 | |||
| 108 | if (!irq_can_set_affinity(irq)) | ||
| 109 | return 0; | ||
| 110 | |||
| 111 | cpus_and(mask, cpu_online_map, irq_default_affinity); | ||
| 112 | |||
| 113 | irq_desc[irq].affinity = mask; | ||
| 114 | irq_desc[irq].chip->set_affinity(irq, mask); | ||
| 115 | |||
| 116 | set_balance_irq_affinity(irq, mask); | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | #endif | ||
| 120 | |||
| 98 | #endif | 121 | #endif |
| 99 | 122 | ||
| 100 | /** | 123 | /** |
| @@ -354,7 +377,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 354 | 377 | ||
| 355 | /* Setup the type (level, edge polarity) if configured: */ | 378 | /* Setup the type (level, edge polarity) if configured: */ |
| 356 | if (new->flags & IRQF_TRIGGER_MASK) { | 379 | if (new->flags & IRQF_TRIGGER_MASK) { |
| 357 | if (desc->chip && desc->chip->set_type) | 380 | if (desc->chip->set_type) |
| 358 | desc->chip->set_type(irq, | 381 | desc->chip->set_type(irq, |
| 359 | new->flags & IRQF_TRIGGER_MASK); | 382 | new->flags & IRQF_TRIGGER_MASK); |
| 360 | else | 383 | else |
| @@ -364,8 +387,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 364 | */ | 387 | */ |
| 365 | printk(KERN_WARNING "No IRQF_TRIGGER set_type " | 388 | printk(KERN_WARNING "No IRQF_TRIGGER set_type " |
| 366 | "function for IRQ %d (%s)\n", irq, | 389 | "function for IRQ %d (%s)\n", irq, |
| 367 | desc->chip ? desc->chip->name : | 390 | desc->chip->name); |
| 368 | "unknown"); | ||
| 369 | } else | 391 | } else |
| 370 | compat_irq_chip_set_default_handler(desc); | 392 | compat_irq_chip_set_default_handler(desc); |
| 371 | 393 | ||
| @@ -382,6 +404,9 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 382 | } else | 404 | } else |
| 383 | /* Undo nested disables: */ | 405 | /* Undo nested disables: */ |
| 384 | desc->depth = 1; | 406 | desc->depth = 1; |
| 407 | |||
| 408 | /* Set default affinity mask once everything is setup */ | ||
| 409 | irq_select_affinity(irq); | ||
| 385 | } | 410 | } |
| 386 | /* Reset broken irq detection when installing new handler */ | 411 | /* Reset broken irq detection when installing new handler */ |
| 387 | desc->irq_count = 0; | 412 | desc->irq_count = 0; |
| @@ -571,8 +596,6 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 571 | action->next = NULL; | 596 | action->next = NULL; |
| 572 | action->dev_id = dev_id; | 597 | action->dev_id = dev_id; |
| 573 | 598 | ||
| 574 | select_smp_affinity(irq); | ||
| 575 | |||
| 576 | #ifdef CONFIG_DEBUG_SHIRQ | 599 | #ifdef CONFIG_DEBUG_SHIRQ |
| 577 | if (irqflags & IRQF_SHARED) { | 600 | if (irqflags & IRQF_SHARED) { |
| 578 | /* | 601 | /* |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index c2f2ccb0549a..6c6d35d68ee9 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -44,7 +44,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
| 44 | unsigned long count, void *data) | 44 | unsigned long count, void *data) |
| 45 | { | 45 | { |
| 46 | unsigned int irq = (int)(long)data, full_count = count, err; | 46 | unsigned int irq = (int)(long)data, full_count = count, err; |
| 47 | cpumask_t new_value, tmp; | 47 | cpumask_t new_value; |
| 48 | 48 | ||
| 49 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || | 49 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || |
| 50 | irq_balancing_disabled(irq)) | 50 | irq_balancing_disabled(irq)) |
| @@ -62,17 +62,51 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
| 62 | * way to make the system unusable accidentally :-) At least | 62 | * way to make the system unusable accidentally :-) At least |
| 63 | * one online CPU still has to be targeted. | 63 | * one online CPU still has to be targeted. |
| 64 | */ | 64 | */ |
| 65 | cpus_and(tmp, new_value, cpu_online_map); | 65 | if (!cpus_intersects(new_value, cpu_online_map)) |
| 66 | if (cpus_empty(tmp)) | ||
| 67 | /* Special case for empty set - allow the architecture | 66 | /* Special case for empty set - allow the architecture |
| 68 | code to set default SMP affinity. */ | 67 | code to set default SMP affinity. */ |
| 69 | return select_smp_affinity(irq) ? -EINVAL : full_count; | 68 | return irq_select_affinity(irq) ? -EINVAL : full_count; |
| 70 | 69 | ||
| 71 | irq_set_affinity(irq, new_value); | 70 | irq_set_affinity(irq, new_value); |
| 72 | 71 | ||
| 73 | return full_count; | 72 | return full_count; |
| 74 | } | 73 | } |
| 75 | 74 | ||
| 75 | static int default_affinity_read(char *page, char **start, off_t off, | ||
| 76 | int count, int *eof, void *data) | ||
| 77 | { | ||
| 78 | int len = cpumask_scnprintf(page, count, irq_default_affinity); | ||
| 79 | if (count - len < 2) | ||
| 80 | return -EINVAL; | ||
| 81 | len += sprintf(page + len, "\n"); | ||
| 82 | return len; | ||
| 83 | } | ||
| 84 | |||
| 85 | static int default_affinity_write(struct file *file, const char __user *buffer, | ||
| 86 | unsigned long count, void *data) | ||
| 87 | { | ||
| 88 | unsigned int full_count = count, err; | ||
| 89 | cpumask_t new_value; | ||
| 90 | |||
| 91 | err = cpumask_parse_user(buffer, count, new_value); | ||
| 92 | if (err) | ||
| 93 | return err; | ||
| 94 | |||
| 95 | if (!is_affinity_mask_valid(new_value)) | ||
| 96 | return -EINVAL; | ||
| 97 | |||
| 98 | /* | ||
| 99 | * Do not allow disabling IRQs completely - it's a too easy | ||
| 100 | * way to make the system unusable accidentally :-) At least | ||
| 101 | * one online CPU still has to be targeted. | ||
| 102 | */ | ||
| 103 | if (!cpus_intersects(new_value, cpu_online_map)) | ||
| 104 | return -EINVAL; | ||
| 105 | |||
| 106 | irq_default_affinity = new_value; | ||
| 107 | |||
| 108 | return full_count; | ||
| 109 | } | ||
| 76 | #endif | 110 | #endif |
| 77 | 111 | ||
| 78 | static int irq_spurious_read(char *page, char **start, off_t off, | 112 | static int irq_spurious_read(char *page, char **start, off_t off, |
| @@ -171,6 +205,21 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
| 171 | remove_proc_entry(action->dir->name, irq_desc[irq].dir); | 205 | remove_proc_entry(action->dir->name, irq_desc[irq].dir); |
| 172 | } | 206 | } |
| 173 | 207 | ||
| 208 | void register_default_affinity_proc(void) | ||
| 209 | { | ||
| 210 | #ifdef CONFIG_SMP | ||
| 211 | struct proc_dir_entry *entry; | ||
| 212 | |||
| 213 | /* create /proc/irq/default_smp_affinity */ | ||
| 214 | entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir); | ||
| 215 | if (entry) { | ||
| 216 | entry->data = NULL; | ||
| 217 | entry->read_proc = default_affinity_read; | ||
| 218 | entry->write_proc = default_affinity_write; | ||
| 219 | } | ||
| 220 | #endif | ||
| 221 | } | ||
| 222 | |||
| 174 | void init_irq_proc(void) | 223 | void init_irq_proc(void) |
| 175 | { | 224 | { |
| 176 | int i; | 225 | int i; |
| @@ -180,6 +229,8 @@ void init_irq_proc(void) | |||
| 180 | if (!root_irq_dir) | 229 | if (!root_irq_dir) |
| 181 | return; | 230 | return; |
| 182 | 231 | ||
| 232 | register_default_affinity_proc(); | ||
| 233 | |||
| 183 | /* | 234 | /* |
| 184 | * Create entries for all existing IRQs. | 235 | * Create entries for all existing IRQs. |
| 185 | */ | 236 | */ |
