diff options
| author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 18:42:26 -0500 |
|---|---|---|
| committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 18:42:26 -0500 |
| commit | d036e67b40f52bdd95392390108defbac7e53837 (patch) | |
| tree | 4a00537671036c955c98891af9f4729332b35c50 | |
| parent | 6b954823c24f04ed026a8517f6bab5abda279db8 (diff) | |
cpumask: convert kernel/irq
Impact: Reduce stack usage, use new cpumask API. ALPHA mod!
Main change is that irq_default_affinity becomes a cpumask_var_t, so
treat it as a pointer (this effects alpha).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
| -rw-r--r-- | arch/alpha/kernel/irq.c | 3 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 2 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 11 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 32 |
4 files changed, 33 insertions, 15 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index d0f1620007f7..703731accda6 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
| @@ -50,7 +50,8 @@ int irq_select_affinity(unsigned int irq) | |||
| 50 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) | 50 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) |
| 51 | return 1; | 51 | return 1; |
| 52 | 52 | ||
| 53 | while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity)) | 53 | while (!cpu_possible(cpu) || |
| 54 | !cpumask_test_cpu(cpu, irq_default_affinity)) | ||
| 54 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 55 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
| 55 | last_cpu = cpu; | 56 | last_cpu = cpu; |
| 56 | 57 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index dfaee6bd265b..91f1ef8e5810 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -109,7 +109,7 @@ extern void enable_irq(unsigned int irq); | |||
| 109 | 109 | ||
| 110 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 110 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
| 111 | 111 | ||
| 112 | extern cpumask_t irq_default_affinity; | 112 | extern cpumask_var_t irq_default_affinity; |
| 113 | 113 | ||
| 114 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); | 114 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
| 115 | extern int irq_can_set_affinity(unsigned int irq); | 115 | extern int irq_can_set_affinity(unsigned int irq); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 61c4a9b62165..cd0cd8dcb345 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -16,8 +16,15 @@ | |||
| 16 | #include "internals.h" | 16 | #include "internals.h" |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
| 19 | cpumask_var_t irq_default_affinity; | ||
| 19 | 20 | ||
| 20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | 21 | static int init_irq_default_affinity(void) |
| 22 | { | ||
| 23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
| 24 | cpumask_setall(irq_default_affinity); | ||
| 25 | return 0; | ||
| 26 | } | ||
| 27 | core_initcall(init_irq_default_affinity); | ||
| 21 | 28 | ||
| 22 | /** | 29 | /** |
| 23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| @@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 127 | desc->status &= ~IRQ_AFFINITY_SET; | 134 | desc->status &= ~IRQ_AFFINITY_SET; |
| 128 | } | 135 | } |
| 129 | 136 | ||
| 130 | cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); | 137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); |
| 131 | set_affinity: | 138 | set_affinity: |
| 132 | desc->chip->set_affinity(irq, &desc->affinity); | 139 | desc->chip->set_affinity(irq, &desc->affinity); |
| 133 | 140 | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d2c0e5ee53c5..2abd3a7716ed 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
| 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 21 | { | 21 | { |
| 22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 23 | cpumask_t *mask = &desc->affinity; | 23 | const struct cpumask *mask = &desc->affinity; |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
| @@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = { | |||
| 93 | 93 | ||
| 94 | static int default_affinity_show(struct seq_file *m, void *v) | 94 | static int default_affinity_show(struct seq_file *m, void *v) |
| 95 | { | 95 | { |
| 96 | seq_cpumask(m, &irq_default_affinity); | 96 | seq_cpumask(m, irq_default_affinity); |
| 97 | seq_putc(m, '\n'); | 97 | seq_putc(m, '\n'); |
| 98 | return 0; | 98 | return 0; |
| 99 | } | 99 | } |
| @@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v) | |||
| 101 | static ssize_t default_affinity_write(struct file *file, | 101 | static ssize_t default_affinity_write(struct file *file, |
| 102 | const char __user *buffer, size_t count, loff_t *ppos) | 102 | const char __user *buffer, size_t count, loff_t *ppos) |
| 103 | { | 103 | { |
| 104 | cpumask_t new_value; | 104 | cpumask_var_t new_value; |
| 105 | int err; | 105 | int err; |
| 106 | 106 | ||
| 107 | err = cpumask_parse_user(buffer, count, &new_value); | 107 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
| 108 | return -ENOMEM; | ||
| 109 | |||
| 110 | err = cpumask_parse_user(buffer, count, new_value); | ||
| 108 | if (err) | 111 | if (err) |
| 109 | return err; | 112 | goto out; |
| 110 | 113 | ||
| 111 | if (!is_affinity_mask_valid(new_value)) | 114 | if (!is_affinity_mask_valid(new_value)) { |
| 112 | return -EINVAL; | 115 | err = -EINVAL; |
| 116 | goto out; | ||
| 117 | } | ||
| 113 | 118 | ||
| 114 | /* | 119 | /* |
| 115 | * Do not allow disabling IRQs completely - it's a too easy | 120 | * Do not allow disabling IRQs completely - it's a too easy |
| 116 | * way to make the system unusable accidentally :-) At least | 121 | * way to make the system unusable accidentally :-) At least |
| 117 | * one online CPU still has to be targeted. | 122 | * one online CPU still has to be targeted. |
| 118 | */ | 123 | */ |
| 119 | if (!cpus_intersects(new_value, cpu_online_map)) | 124 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
| 120 | return -EINVAL; | 125 | err = -EINVAL; |
| 126 | goto out; | ||
| 127 | } | ||
| 121 | 128 | ||
| 122 | irq_default_affinity = new_value; | 129 | cpumask_copy(irq_default_affinity, new_value); |
| 130 | err = count; | ||
| 123 | 131 | ||
| 124 | return count; | 132 | out: |
| 133 | free_cpumask_var(new_value); | ||
| 134 | return err; | ||
| 125 | } | 135 | } |
| 126 | 136 | ||
| 127 | static int default_affinity_open(struct inode *inode, struct file *file) | 137 | static int default_affinity_open(struct inode *inode, struct file *file) |
