aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-12-31 18:42:26 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-12-31 18:42:26 -0500
commitd036e67b40f52bdd95392390108defbac7e53837 (patch)
tree4a00537671036c955c98891af9f4729332b35c50 /kernel/irq
parent6b954823c24f04ed026a8517f6bab5abda279db8 (diff)
cpumask: convert kernel/irq
Impact: Reduce stack usage, use new cpumask API. ALPHA mod! Main change is that irq_default_affinity becomes a cpumask_var_t, so treat it as a pointer (this effects alpha). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/manage.c11
-rw-r--r--kernel/irq/proc.c32
2 files changed, 30 insertions, 13 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 61c4a9b62165..cd0cd8dcb345 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,8 +16,15 @@
16#include "internals.h" 16#include "internals.h"
17 17
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19cpumask_var_t irq_default_affinity;
19 20
20cpumask_t irq_default_affinity = CPU_MASK_ALL; 21static int init_irq_default_affinity(void)
22{
23 alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24 cpumask_setall(irq_default_affinity);
25 return 0;
26}
27core_initcall(init_irq_default_affinity);
21 28
22/** 29/**
23 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
127 desc->status &= ~IRQ_AFFINITY_SET; 134 desc->status &= ~IRQ_AFFINITY_SET;
128 } 135 }
129 136
130 cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); 137 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
131set_affinity: 138set_affinity:
132 desc->chip->set_affinity(irq, &desc->affinity); 139 desc->chip->set_affinity(irq, &desc->affinity);
133 140
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d2c0e5ee53c5..2abd3a7716ed 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 cpumask_t *mask = &desc->affinity; 23 const struct cpumask *mask = &desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
@@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = {
93 93
94static int default_affinity_show(struct seq_file *m, void *v) 94static int default_affinity_show(struct seq_file *m, void *v)
95{ 95{
96 seq_cpumask(m, &irq_default_affinity); 96 seq_cpumask(m, irq_default_affinity);
97 seq_putc(m, '\n'); 97 seq_putc(m, '\n');
98 return 0; 98 return 0;
99} 99}
@@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v)
101static ssize_t default_affinity_write(struct file *file, 101static ssize_t default_affinity_write(struct file *file,
102 const char __user *buffer, size_t count, loff_t *ppos) 102 const char __user *buffer, size_t count, loff_t *ppos)
103{ 103{
104 cpumask_t new_value; 104 cpumask_var_t new_value;
105 int err; 105 int err;
106 106
107 err = cpumask_parse_user(buffer, count, &new_value); 107 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
108 return -ENOMEM;
109
110 err = cpumask_parse_user(buffer, count, new_value);
108 if (err) 111 if (err)
109 return err; 112 goto out;
110 113
111 if (!is_affinity_mask_valid(new_value)) 114 if (!is_affinity_mask_valid(new_value)) {
112 return -EINVAL; 115 err = -EINVAL;
116 goto out;
117 }
113 118
114 /* 119 /*
115 * Do not allow disabling IRQs completely - it's a too easy 120 * Do not allow disabling IRQs completely - it's a too easy
116 * way to make the system unusable accidentally :-) At least 121 * way to make the system unusable accidentally :-) At least
117 * one online CPU still has to be targeted. 122 * one online CPU still has to be targeted.
118 */ 123 */
119 if (!cpus_intersects(new_value, cpu_online_map)) 124 if (!cpumask_intersects(new_value, cpu_online_mask)) {
120 return -EINVAL; 125 err = -EINVAL;
126 goto out;
127 }
121 128
122 irq_default_affinity = new_value; 129 cpumask_copy(irq_default_affinity, new_value);
130 err = count;
123 131
124 return count; 132out:
133 free_cpumask_var(new_value);
134 return err;
125} 135}
126 136
127static int default_affinity_open(struct inode *inode, struct file *file) 137static int default_affinity_open(struct inode *inode, struct file *file)