diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-02 04:21:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-02 04:21:26 -0400 |
commit | d6d5aeb661fc14655c417f3582ae7ec52985d2a8 (patch) | |
tree | 5e168da05cb28d10b5accc74718428cfd5527201 /kernel/irq | |
parent | 7e6e178ab1548c8d894a77593e757acf4510b8ba (diff) | |
parent | 94aca1dac6f6d21f4b07e4864baf7768cabcc6e7 (diff) |
Merge commit 'v2.6.27-rc8' into genirq
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/chip.c | 12 | ||||
-rw-r--r-- | kernel/irq/manage.c | 111 | ||||
-rw-r--r-- | kernel/irq/proc.c | 96 |
3 files changed, 127 insertions, 92 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 240c64d59267..d663338cb4a8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq) | |||
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | 29 | ||
30 | if (irq >= NR_IRQS) { | 30 | if (irq >= NR_IRQS) { |
31 | printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); | 31 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); |
32 | WARN_ON(1); | ||
33 | return; | 32 | return; |
34 | } | 33 | } |
35 | 34 | ||
@@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
62 | unsigned long flags; | 61 | unsigned long flags; |
63 | 62 | ||
64 | if (irq >= NR_IRQS) { | 63 | if (irq >= NR_IRQS) { |
65 | printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); | 64 | WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); |
66 | WARN_ON(1); | ||
67 | return; | 65 | return; |
68 | } | 66 | } |
69 | 67 | ||
@@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
71 | spin_lock_irqsave(&desc->lock, flags); | 69 | spin_lock_irqsave(&desc->lock, flags); |
72 | if (desc->action) { | 70 | if (desc->action) { |
73 | spin_unlock_irqrestore(&desc->lock, flags); | 71 | spin_unlock_irqrestore(&desc->lock, flags); |
74 | printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n", | 72 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", |
75 | irq); | 73 | irq); |
76 | WARN_ON(1); | ||
77 | return; | 74 | return; |
78 | } | 75 | } |
79 | desc->msi_desc = NULL; | 76 | desc->msi_desc = NULL; |
@@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
96 | unsigned long flags; | 93 | unsigned long flags; |
97 | 94 | ||
98 | if (irq >= NR_IRQS) { | 95 | if (irq >= NR_IRQS) { |
99 | printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); | 96 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); |
100 | WARN_ON(1); | ||
101 | return -EINVAL; | 97 | return -EINVAL; |
102 | } | 98 | } |
103 | 99 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9aa3e7b81389..d62f69ba7453 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -177,8 +177,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq) | |||
177 | { | 177 | { |
178 | switch (desc->depth) { | 178 | switch (desc->depth) { |
179 | case 0: | 179 | case 0: |
180 | printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 180 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
181 | WARN_ON(1); | ||
182 | break; | 181 | break; |
183 | case 1: { | 182 | case 1: { |
184 | unsigned int status = desc->status & ~IRQ_DISABLED; | 183 | unsigned int status = desc->status & ~IRQ_DISABLED; |
@@ -217,6 +216,17 @@ void enable_irq(unsigned int irq) | |||
217 | } | 216 | } |
218 | EXPORT_SYMBOL(enable_irq); | 217 | EXPORT_SYMBOL(enable_irq); |
219 | 218 | ||
219 | int set_irq_wake_real(unsigned int irq, unsigned int on) | ||
220 | { | ||
221 | struct irq_desc *desc = irq_desc + irq; | ||
222 | int ret = -ENXIO; | ||
223 | |||
224 | if (desc->chip->set_wake) | ||
225 | ret = desc->chip->set_wake(irq, on); | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
220 | /** | 230 | /** |
221 | * set_irq_wake - control irq power management wakeup | 231 | * set_irq_wake - control irq power management wakeup |
222 | * @irq: interrupt to control | 232 | * @irq: interrupt to control |
@@ -233,30 +243,32 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
233 | { | 243 | { |
234 | struct irq_desc *desc = irq_desc + irq; | 244 | struct irq_desc *desc = irq_desc + irq; |
235 | unsigned long flags; | 245 | unsigned long flags; |
236 | int ret = -ENXIO; | 246 | int ret = 0; |
237 | int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake; | ||
238 | 247 | ||
239 | /* wakeup-capable irqs can be shared between drivers that | 248 | /* wakeup-capable irqs can be shared between drivers that |
240 | * don't need to have the same sleep mode behaviors. | 249 | * don't need to have the same sleep mode behaviors. |
241 | */ | 250 | */ |
242 | spin_lock_irqsave(&desc->lock, flags); | 251 | spin_lock_irqsave(&desc->lock, flags); |
243 | if (on) { | 252 | if (on) { |
244 | if (desc->wake_depth++ == 0) | 253 | if (desc->wake_depth++ == 0) { |
245 | desc->status |= IRQ_WAKEUP; | 254 | ret = set_irq_wake_real(irq, on); |
246 | else | 255 | if (ret) |
247 | set_wake = NULL; | 256 | desc->wake_depth = 0; |
257 | else | ||
258 | desc->status |= IRQ_WAKEUP; | ||
259 | } | ||
248 | } else { | 260 | } else { |
249 | if (desc->wake_depth == 0) { | 261 | if (desc->wake_depth == 0) { |
250 | printk(KERN_WARNING "Unbalanced IRQ %d " | 262 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
251 | "wake disable\n", irq); | 263 | } else if (--desc->wake_depth == 0) { |
252 | WARN_ON(1); | 264 | ret = set_irq_wake_real(irq, on); |
253 | } else if (--desc->wake_depth == 0) | 265 | if (ret) |
254 | desc->status &= ~IRQ_WAKEUP; | 266 | desc->wake_depth = 1; |
255 | else | 267 | else |
256 | set_wake = NULL; | 268 | desc->status &= ~IRQ_WAKEUP; |
269 | } | ||
257 | } | 270 | } |
258 | if (set_wake) | 271 | |
259 | ret = desc->chip->set_wake(irq, on); | ||
260 | spin_unlock_irqrestore(&desc->lock, flags); | 272 | spin_unlock_irqrestore(&desc->lock, flags); |
261 | return ret; | 273 | return ret; |
262 | } | 274 | } |
@@ -293,6 +305,31 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
293 | desc->handle_irq = NULL; | 305 | desc->handle_irq = NULL; |
294 | } | 306 | } |
295 | 307 | ||
308 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | ||
309 | unsigned long flags) | ||
310 | { | ||
311 | int ret; | ||
312 | |||
313 | if (!chip || !chip->set_type) { | ||
314 | /* | ||
315 | * IRQF_TRIGGER_* but the PIC does not support multiple | ||
316 | * flow-types? | ||
317 | */ | ||
318 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | ||
319 | chip ? (chip->name ? : "unknown") : "unknown"); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | ||
324 | |||
325 | if (ret) | ||
326 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | ||
327 | (int)(flags & IRQF_TRIGGER_MASK), | ||
328 | irq, chip->set_type); | ||
329 | |||
330 | return ret; | ||
331 | } | ||
332 | |||
296 | /* | 333 | /* |
297 | * Internal function to register an irqaction - typically used to | 334 | * Internal function to register an irqaction - typically used to |
298 | * allocate special interrupts that are part of the architecture. | 335 | * allocate special interrupts that are part of the architecture. |
@@ -304,6 +341,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
304 | const char *old_name = NULL; | 341 | const char *old_name = NULL; |
305 | unsigned long flags; | 342 | unsigned long flags; |
306 | int shared = 0; | 343 | int shared = 0; |
344 | int ret; | ||
307 | 345 | ||
308 | if (irq >= NR_IRQS) | 346 | if (irq >= NR_IRQS) |
309 | return -EINVAL; | 347 | return -EINVAL; |
@@ -361,35 +399,23 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
361 | shared = 1; | 399 | shared = 1; |
362 | } | 400 | } |
363 | 401 | ||
364 | *p = new; | ||
365 | |||
366 | /* Exclude IRQ from balancing */ | ||
367 | if (new->flags & IRQF_NOBALANCING) | ||
368 | desc->status |= IRQ_NO_BALANCING; | ||
369 | |||
370 | if (!shared) { | 402 | if (!shared) { |
371 | irq_chip_set_defaults(desc->chip); | 403 | irq_chip_set_defaults(desc->chip); |
372 | 404 | ||
373 | #if defined(CONFIG_IRQ_PER_CPU) | ||
374 | if (new->flags & IRQF_PERCPU) | ||
375 | desc->status |= IRQ_PER_CPU; | ||
376 | #endif | ||
377 | |||
378 | /* Setup the type (level, edge polarity) if configured: */ | 405 | /* Setup the type (level, edge polarity) if configured: */ |
379 | if (new->flags & IRQF_TRIGGER_MASK) { | 406 | if (new->flags & IRQF_TRIGGER_MASK) { |
380 | if (desc->chip->set_type) | 407 | ret = __irq_set_trigger(desc->chip, irq, new->flags); |
381 | desc->chip->set_type(irq, | 408 | |
382 | new->flags & IRQF_TRIGGER_MASK); | 409 | if (ret) { |
383 | else | 410 | spin_unlock_irqrestore(&desc->lock, flags); |
384 | /* | 411 | return ret; |
385 | * IRQF_TRIGGER_* but the PIC does not support | 412 | } |
386 | * multiple flow-types? | ||
387 | */ | ||
388 | printk(KERN_WARNING "No IRQF_TRIGGER set_type " | ||
389 | "function for IRQ %d (%s)\n", irq, | ||
390 | desc->chip->name); | ||
391 | } else | 413 | } else |
392 | compat_irq_chip_set_default_handler(desc); | 414 | compat_irq_chip_set_default_handler(desc); |
415 | #if defined(CONFIG_IRQ_PER_CPU) | ||
416 | if (new->flags & IRQF_PERCPU) | ||
417 | desc->status |= IRQ_PER_CPU; | ||
418 | #endif | ||
393 | 419 | ||
394 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 420 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | |
395 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 421 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
@@ -405,6 +431,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
405 | /* Set default affinity mask once everything is setup */ | 431 | /* Set default affinity mask once everything is setup */ |
406 | irq_select_affinity(irq); | 432 | irq_select_affinity(irq); |
407 | } | 433 | } |
434 | |||
435 | *p = new; | ||
436 | |||
437 | /* Exclude IRQ from balancing */ | ||
438 | if (new->flags & IRQF_NOBALANCING) | ||
439 | desc->status |= IRQ_NO_BALANCING; | ||
440 | |||
408 | /* Reset broken irq detection when installing new handler */ | 441 | /* Reset broken irq detection when installing new handler */ |
409 | desc->irq_count = 0; | 442 | desc->irq_count = 0; |
410 | desc->irqs_unhandled = 0; | 443 | desc->irqs_unhandled = 0; |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 6c6d35d68ee9..a09dd29c2fd7 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/proc_fs.h> | 10 | #include <linux/proc_fs.h> |
11 | #include <linux/seq_file.h> | ||
11 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
12 | 13 | ||
13 | #include "internals.h" | 14 | #include "internals.h" |
@@ -16,23 +17,18 @@ static struct proc_dir_entry *root_irq_dir; | |||
16 | 17 | ||
17 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
18 | 19 | ||
19 | static int irq_affinity_read_proc(char *page, char **start, off_t off, | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
20 | int count, int *eof, void *data) | ||
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_desc + (long)data; | 22 | struct irq_desc *desc = irq_desc + (long)m->private; |
23 | cpumask_t *mask = &desc->affinity; | 23 | cpumask_t *mask = &desc->affinity; |
24 | int len; | ||
25 | 24 | ||
26 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
27 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
28 | mask = &desc->pending_mask; | 27 | mask = &desc->pending_mask; |
29 | #endif | 28 | #endif |
30 | len = cpumask_scnprintf(page, count, *mask); | 29 | seq_cpumask(m, mask); |
31 | 30 | seq_putc(m, '\n'); | |
32 | if (count - len < 2) | 31 | return 0; |
33 | return -EINVAL; | ||
34 | len += sprintf(page + len, "\n"); | ||
35 | return len; | ||
36 | } | 32 | } |
37 | 33 | ||
38 | #ifndef is_affinity_mask_valid | 34 | #ifndef is_affinity_mask_valid |
@@ -40,11 +36,12 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off, | |||
40 | #endif | 36 | #endif |
41 | 37 | ||
42 | int no_irq_affinity; | 38 | int no_irq_affinity; |
43 | static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | 39 | static ssize_t irq_affinity_proc_write(struct file *file, |
44 | unsigned long count, void *data) | 40 | const char __user *buffer, size_t count, loff_t *pos) |
45 | { | 41 | { |
46 | unsigned int irq = (int)(long)data, full_count = count, err; | 42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
47 | cpumask_t new_value; | 43 | cpumask_t new_value; |
44 | int err; | ||
48 | 45 | ||
49 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || | 46 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || |
50 | irq_balancing_disabled(irq)) | 47 | irq_balancing_disabled(irq)) |
@@ -65,28 +62,38 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
65 | if (!cpus_intersects(new_value, cpu_online_map)) | 62 | if (!cpus_intersects(new_value, cpu_online_map)) |
66 | /* Special case for empty set - allow the architecture | 63 | /* Special case for empty set - allow the architecture |
67 | code to set default SMP affinity. */ | 64 | code to set default SMP affinity. */ |
68 | return irq_select_affinity(irq) ? -EINVAL : full_count; | 65 | return irq_select_affinity(irq) ? -EINVAL : count; |
69 | 66 | ||
70 | irq_set_affinity(irq, new_value); | 67 | irq_set_affinity(irq, new_value); |
71 | 68 | ||
72 | return full_count; | 69 | return count; |
73 | } | 70 | } |
74 | 71 | ||
75 | static int default_affinity_read(char *page, char **start, off_t off, | 72 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
76 | int count, int *eof, void *data) | ||
77 | { | 73 | { |
78 | int len = cpumask_scnprintf(page, count, irq_default_affinity); | 74 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); |
79 | if (count - len < 2) | ||
80 | return -EINVAL; | ||
81 | len += sprintf(page + len, "\n"); | ||
82 | return len; | ||
83 | } | 75 | } |
84 | 76 | ||
85 | static int default_affinity_write(struct file *file, const char __user *buffer, | 77 | static const struct file_operations irq_affinity_proc_fops = { |
86 | unsigned long count, void *data) | 78 | .open = irq_affinity_proc_open, |
79 | .read = seq_read, | ||
80 | .llseek = seq_lseek, | ||
81 | .release = single_release, | ||
82 | .write = irq_affinity_proc_write, | ||
83 | }; | ||
84 | |||
85 | static int default_affinity_show(struct seq_file *m, void *v) | ||
86 | { | ||
87 | seq_cpumask(m, &irq_default_affinity); | ||
88 | seq_putc(m, '\n'); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static ssize_t default_affinity_write(struct file *file, | ||
93 | const char __user *buffer, size_t count, loff_t *ppos) | ||
87 | { | 94 | { |
88 | unsigned int full_count = count, err; | ||
89 | cpumask_t new_value; | 95 | cpumask_t new_value; |
96 | int err; | ||
90 | 97 | ||
91 | err = cpumask_parse_user(buffer, count, new_value); | 98 | err = cpumask_parse_user(buffer, count, new_value); |
92 | if (err) | 99 | if (err) |
@@ -105,8 +112,21 @@ static int default_affinity_write(struct file *file, const char __user *buffer, | |||
105 | 112 | ||
106 | irq_default_affinity = new_value; | 113 | irq_default_affinity = new_value; |
107 | 114 | ||
108 | return full_count; | 115 | return count; |
109 | } | 116 | } |
117 | |||
118 | static int default_affinity_open(struct inode *inode, struct file *file) | ||
119 | { | ||
120 | return single_open(file, default_affinity_show, NULL); | ||
121 | } | ||
122 | |||
123 | static const struct file_operations default_affinity_proc_fops = { | ||
124 | .open = default_affinity_open, | ||
125 | .read = seq_read, | ||
126 | .llseek = seq_lseek, | ||
127 | .release = single_release, | ||
128 | .write = default_affinity_write, | ||
129 | }; | ||
110 | #endif | 130 | #endif |
111 | 131 | ||
112 | static int irq_spurious_read(char *page, char **start, off_t off, | 132 | static int irq_spurious_read(char *page, char **start, off_t off, |
@@ -178,16 +198,9 @@ void register_irq_proc(unsigned int irq) | |||
178 | irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); | 198 | irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); |
179 | 199 | ||
180 | #ifdef CONFIG_SMP | 200 | #ifdef CONFIG_SMP |
181 | { | 201 | /* create /proc/irq/<irq>/smp_affinity */ |
182 | /* create /proc/irq/<irq>/smp_affinity */ | 202 | proc_create_data("smp_affinity", 0600, irq_desc[irq].dir, |
183 | entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir); | 203 | &irq_affinity_proc_fops, (void *)(long)irq); |
184 | |||
185 | if (entry) { | ||
186 | entry->data = (void *)(long)irq; | ||
187 | entry->read_proc = irq_affinity_read_proc; | ||
188 | entry->write_proc = irq_affinity_write_proc; | ||
189 | } | ||
190 | } | ||
191 | #endif | 204 | #endif |
192 | 205 | ||
193 | entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); | 206 | entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); |
@@ -208,15 +221,8 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
208 | void register_default_affinity_proc(void) | 221 | void register_default_affinity_proc(void) |
209 | { | 222 | { |
210 | #ifdef CONFIG_SMP | 223 | #ifdef CONFIG_SMP |
211 | struct proc_dir_entry *entry; | 224 | proc_create("irq/default_smp_affinity", 0600, NULL, |
212 | 225 | &default_affinity_proc_fops); | |
213 | /* create /proc/irq/default_smp_affinity */ | ||
214 | entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir); | ||
215 | if (entry) { | ||
216 | entry->data = NULL; | ||
217 | entry->read_proc = default_affinity_read; | ||
218 | entry->write_proc = default_affinity_write; | ||
219 | } | ||
220 | #endif | 226 | #endif |
221 | } | 227 | } |
222 | 228 | ||