diff options
author | Max Krasnyansky <maxk@qualcomm.com> | 2008-05-29 14:02:52 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-06-05 09:18:30 -0400 |
commit | 18404756765c713a0be4eb1082920c04822ce588 (patch) | |
tree | ed426f8fe90bff1ffd854074a2e4b370dd6821f8 /kernel/irq | |
parent | c3b25b32e8bef526cca748e1ba023c6bdd705a99 (diff) |
genirq: Expose default irq affinity mask (take 3)
Current IRQ affinity interface does not provide a way to set affinity
for the IRQs that will be allocated/activated in the future.
This patch creates /proc/irq/default_smp_affinity that lets users set
default affinity mask for the newly allocated IRQs. Changing the default
does not affect affinity masks for the currently active IRQs, they
have to be changed explicitly.
Updated based on Paul J's comments and added some more documentation.
Signed-off-by: Max Krasnyansky <maxk@qualcomm.com>
Cc: pj@sgi.com
Cc: a.p.zijlstra@chello.nl
Cc: tglx@linutronix.de
Cc: rdunlap@xenotime.net
Cc: mingo@elte.hu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/manage.c | 28 | ||||
-rw-r--r-- | kernel/irq/proc.c | 59 |
2 files changed, 81 insertions, 6 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 46d6611a33bb..469814e9b9ee 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -17,6 +17,8 @@ | |||
17 | 17 | ||
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
19 | 19 | ||
20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | ||
21 | |||
20 | /** | 22 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
22 | * @irq: interrupt number to wait for | 24 | * @irq: interrupt number to wait for |
@@ -95,6 +97,27 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
95 | return 0; | 97 | return 0; |
96 | } | 98 | } |
97 | 99 | ||
100 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | ||
101 | /* | ||
102 | * Generic version of the affinity autoselector. | ||
103 | */ | ||
104 | int irq_select_affinity(unsigned int irq) | ||
105 | { | ||
106 | cpumask_t mask; | ||
107 | |||
108 | if (!irq_can_set_affinity(irq)) | ||
109 | return 0; | ||
110 | |||
111 | cpus_and(mask, cpu_online_map, irq_default_affinity); | ||
112 | |||
113 | irq_desc[irq].affinity = mask; | ||
114 | irq_desc[irq].chip->set_affinity(irq, mask); | ||
115 | |||
116 | set_balance_irq_affinity(irq, mask); | ||
117 | return 0; | ||
118 | } | ||
119 | #endif | ||
120 | |||
98 | #endif | 121 | #endif |
99 | 122 | ||
100 | /** | 123 | /** |
@@ -382,6 +405,9 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
382 | } else | 405 | } else |
383 | /* Undo nested disables: */ | 406 | /* Undo nested disables: */ |
384 | desc->depth = 1; | 407 | desc->depth = 1; |
408 | |||
409 | /* Set default affinity mask once everything is setup */ | ||
410 | irq_select_affinity(irq); | ||
385 | } | 411 | } |
386 | /* Reset broken irq detection when installing new handler */ | 412 | /* Reset broken irq detection when installing new handler */ |
387 | desc->irq_count = 0; | 413 | desc->irq_count = 0; |
@@ -571,8 +597,6 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
571 | action->next = NULL; | 597 | action->next = NULL; |
572 | action->dev_id = dev_id; | 598 | action->dev_id = dev_id; |
573 | 599 | ||
574 | select_smp_affinity(irq); | ||
575 | |||
576 | #ifdef CONFIG_DEBUG_SHIRQ | 600 | #ifdef CONFIG_DEBUG_SHIRQ |
577 | if (irqflags & IRQF_SHARED) { | 601 | if (irqflags & IRQF_SHARED) { |
578 | /* | 602 | /* |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index c2f2ccb0549a..6c6d35d68ee9 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -44,7 +44,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
44 | unsigned long count, void *data) | 44 | unsigned long count, void *data) |
45 | { | 45 | { |
46 | unsigned int irq = (int)(long)data, full_count = count, err; | 46 | unsigned int irq = (int)(long)data, full_count = count, err; |
47 | cpumask_t new_value, tmp; | 47 | cpumask_t new_value; |
48 | 48 | ||
49 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || | 49 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || |
50 | irq_balancing_disabled(irq)) | 50 | irq_balancing_disabled(irq)) |
@@ -62,17 +62,51 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
62 | * way to make the system unusable accidentally :-) At least | 62 | * way to make the system unusable accidentally :-) At least |
63 | * one online CPU still has to be targeted. | 63 | * one online CPU still has to be targeted. |
64 | */ | 64 | */ |
65 | cpus_and(tmp, new_value, cpu_online_map); | 65 | if (!cpus_intersects(new_value, cpu_online_map)) |
66 | if (cpus_empty(tmp)) | ||
67 | /* Special case for empty set - allow the architecture | 66 | /* Special case for empty set - allow the architecture |
68 | code to set default SMP affinity. */ | 67 | code to set default SMP affinity. */ |
69 | return select_smp_affinity(irq) ? -EINVAL : full_count; | 68 | return irq_select_affinity(irq) ? -EINVAL : full_count; |
70 | 69 | ||
71 | irq_set_affinity(irq, new_value); | 70 | irq_set_affinity(irq, new_value); |
72 | 71 | ||
73 | return full_count; | 72 | return full_count; |
74 | } | 73 | } |
75 | 74 | ||
75 | static int default_affinity_read(char *page, char **start, off_t off, | ||
76 | int count, int *eof, void *data) | ||
77 | { | ||
78 | int len = cpumask_scnprintf(page, count, irq_default_affinity); | ||
79 | if (count - len < 2) | ||
80 | return -EINVAL; | ||
81 | len += sprintf(page + len, "\n"); | ||
82 | return len; | ||
83 | } | ||
84 | |||
85 | static int default_affinity_write(struct file *file, const char __user *buffer, | ||
86 | unsigned long count, void *data) | ||
87 | { | ||
88 | unsigned int full_count = count, err; | ||
89 | cpumask_t new_value; | ||
90 | |||
91 | err = cpumask_parse_user(buffer, count, new_value); | ||
92 | if (err) | ||
93 | return err; | ||
94 | |||
95 | if (!is_affinity_mask_valid(new_value)) | ||
96 | return -EINVAL; | ||
97 | |||
98 | /* | ||
99 | * Do not allow disabling IRQs completely - it's a too easy | ||
100 | * way to make the system unusable accidentally :-) At least | ||
101 | * one online CPU still has to be targeted. | ||
102 | */ | ||
103 | if (!cpus_intersects(new_value, cpu_online_map)) | ||
104 | return -EINVAL; | ||
105 | |||
106 | irq_default_affinity = new_value; | ||
107 | |||
108 | return full_count; | ||
109 | } | ||
76 | #endif | 110 | #endif |
77 | 111 | ||
78 | static int irq_spurious_read(char *page, char **start, off_t off, | 112 | static int irq_spurious_read(char *page, char **start, off_t off, |
@@ -171,6 +205,21 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
171 | remove_proc_entry(action->dir->name, irq_desc[irq].dir); | 205 | remove_proc_entry(action->dir->name, irq_desc[irq].dir); |
172 | } | 206 | } |
173 | 207 | ||
208 | void register_default_affinity_proc(void) | ||
209 | { | ||
210 | #ifdef CONFIG_SMP | ||
211 | struct proc_dir_entry *entry; | ||
212 | |||
213 | /* create /proc/irq/default_smp_affinity */ | ||
214 | entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir); | ||
215 | if (entry) { | ||
216 | entry->data = NULL; | ||
217 | entry->read_proc = default_affinity_read; | ||
218 | entry->write_proc = default_affinity_write; | ||
219 | } | ||
220 | #endif | ||
221 | } | ||
222 | |||
174 | void init_irq_proc(void) | 223 | void init_irq_proc(void) |
175 | { | 224 | { |
176 | int i; | 225 | int i; |
@@ -180,6 +229,8 @@ void init_irq_proc(void) | |||
180 | if (!root_irq_dir) | 229 | if (!root_irq_dir) |
181 | return; | 230 | return; |
182 | 231 | ||
232 | register_default_affinity_proc(); | ||
233 | |||
183 | /* | 234 | /* |
184 | * Create entries for all existing IRQs. | 235 | * Create entries for all existing IRQs. |
185 | */ | 236 | */ |