diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/chip.c | 1 | ||||
-rw-r--r-- | kernel/irq/handle.c | 5 | ||||
-rw-r--r-- | kernel/irq/manage.c | 38 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 11 | ||||
-rw-r--r-- | kernel/irq/proc.c | 32 |
5 files changed, 55 insertions, 32 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b343deedae91..f63c706d25e1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -125,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type) | |||
125 | return -ENODEV; | 125 | return -ENODEV; |
126 | } | 126 | } |
127 | 127 | ||
128 | type &= IRQ_TYPE_SENSE_MASK; | ||
128 | if (type == IRQ_TYPE_NONE) | 129 | if (type == IRQ_TYPE_NONE) |
129 | return 0; | 130 | return 0; |
130 | 131 | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index f1a23069c20a..6492400cb50d 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -422,11 +422,8 @@ out: | |||
422 | } | 422 | } |
423 | #endif | 423 | #endif |
424 | 424 | ||
425 | |||
426 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
427 | void early_init_irq_lock_class(void) | 425 | void early_init_irq_lock_class(void) |
428 | { | 426 | { |
429 | #ifndef CONFIG_SPARSE_IRQ | ||
430 | struct irq_desc *desc; | 427 | struct irq_desc *desc; |
431 | int i; | 428 | int i; |
432 | 429 | ||
@@ -436,9 +433,7 @@ void early_init_irq_lock_class(void) | |||
436 | 433 | ||
437 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 434 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
438 | } | 435 | } |
439 | #endif | ||
440 | } | 436 | } |
441 | #endif | ||
442 | 437 | ||
443 | #ifdef CONFIG_SPARSE_IRQ | 438 | #ifdef CONFIG_SPARSE_IRQ |
444 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 439 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 10ad2f87ed9a..cd0cd8dcb345 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -16,8 +16,15 @@ | |||
16 | #include "internals.h" | 16 | #include "internals.h" |
17 | 17 | ||
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
19 | cpumask_var_t irq_default_affinity; | ||
19 | 20 | ||
20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | 21 | static int init_irq_default_affinity(void) |
22 | { | ||
23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
24 | cpumask_setall(irq_default_affinity); | ||
25 | return 0; | ||
26 | } | ||
27 | core_initcall(init_irq_default_affinity); | ||
21 | 28 | ||
22 | /** | 29 | /** |
23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
@@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
127 | desc->status &= ~IRQ_AFFINITY_SET; | 134 | desc->status &= ~IRQ_AFFINITY_SET; |
128 | } | 135 | } |
129 | 136 | ||
130 | cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); | 137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); |
131 | set_affinity: | 138 | set_affinity: |
132 | desc->chip->set_affinity(irq, &desc->affinity); | 139 | desc->chip->set_affinity(irq, &desc->affinity); |
133 | 140 | ||
@@ -368,16 +375,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
368 | return 0; | 375 | return 0; |
369 | } | 376 | } |
370 | 377 | ||
371 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | 378 | /* caller masked out all except trigger mode flags */ |
379 | ret = chip->set_type(irq, flags); | ||
372 | 380 | ||
373 | if (ret) | 381 | if (ret) |
374 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 382 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
375 | (int)(flags & IRQF_TRIGGER_MASK), | 383 | (int)flags, irq, chip->set_type); |
376 | irq, chip->set_type); | ||
377 | else { | 384 | else { |
385 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | ||
386 | flags |= IRQ_LEVEL; | ||
378 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 387 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ |
379 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | 388 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
380 | desc->status |= flags & IRQ_TYPE_SENSE_MASK; | 389 | desc->status |= flags; |
381 | } | 390 | } |
382 | 391 | ||
383 | return ret; | 392 | return ret; |
@@ -457,7 +466,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
457 | 466 | ||
458 | /* Setup the type (level, edge polarity) if configured: */ | 467 | /* Setup the type (level, edge polarity) if configured: */ |
459 | if (new->flags & IRQF_TRIGGER_MASK) { | 468 | if (new->flags & IRQF_TRIGGER_MASK) { |
460 | ret = __irq_set_trigger(desc, irq, new->flags); | 469 | ret = __irq_set_trigger(desc, irq, |
470 | new->flags & IRQF_TRIGGER_MASK); | ||
461 | 471 | ||
462 | if (ret) { | 472 | if (ret) { |
463 | spin_unlock_irqrestore(&desc->lock, flags); | 473 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -671,6 +681,18 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
671 | struct irq_desc *desc; | 681 | struct irq_desc *desc; |
672 | int retval; | 682 | int retval; |
673 | 683 | ||
684 | /* | ||
685 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
686 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
687 | * the behavior is classified as "will not fix" so we need to | ||
688 | * start nudging drivers away from using that idiom. | ||
689 | */ | ||
690 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | ||
691 | == (IRQF_SHARED|IRQF_DISABLED)) | ||
692 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | ||
693 | "guaranteed on shared IRQs\n", | ||
694 | irq, devname); | ||
695 | |||
674 | #ifdef CONFIG_LOCKDEP | 696 | #ifdef CONFIG_LOCKDEP |
675 | /* | 697 | /* |
676 | * Lockdep wants atomic interrupt handlers: | 698 | * Lockdep wants atomic interrupt handlers: |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 0178e2296990..089c3746358a 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -1,13 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/irq/handle.c | 2 | * NUMA irq-desc migration code |
3 | * | ||
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | ||
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | ||
6 | * | ||
7 | * This file contains the core interrupt handling code. | ||
8 | * | ||
9 | * Detailed information is available in Documentation/DocBook/genericirq | ||
10 | * | 3 | * |
4 | * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to | ||
5 | * the new "home node" of the IRQ. | ||
11 | */ | 6 | */ |
12 | 7 | ||
13 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d2c0e5ee53c5..2abd3a7716ed 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
23 | cpumask_t *mask = &desc->affinity; | 23 | const struct cpumask *mask = &desc->affinity; |
24 | 24 | ||
25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
@@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = { | |||
93 | 93 | ||
94 | static int default_affinity_show(struct seq_file *m, void *v) | 94 | static int default_affinity_show(struct seq_file *m, void *v) |
95 | { | 95 | { |
96 | seq_cpumask(m, &irq_default_affinity); | 96 | seq_cpumask(m, irq_default_affinity); |
97 | seq_putc(m, '\n'); | 97 | seq_putc(m, '\n'); |
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
@@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v) | |||
101 | static ssize_t default_affinity_write(struct file *file, | 101 | static ssize_t default_affinity_write(struct file *file, |
102 | const char __user *buffer, size_t count, loff_t *ppos) | 102 | const char __user *buffer, size_t count, loff_t *ppos) |
103 | { | 103 | { |
104 | cpumask_t new_value; | 104 | cpumask_var_t new_value; |
105 | int err; | 105 | int err; |
106 | 106 | ||
107 | err = cpumask_parse_user(buffer, count, &new_value); | 107 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
108 | return -ENOMEM; | ||
109 | |||
110 | err = cpumask_parse_user(buffer, count, new_value); | ||
108 | if (err) | 111 | if (err) |
109 | return err; | 112 | goto out; |
110 | 113 | ||
111 | if (!is_affinity_mask_valid(new_value)) | 114 | if (!is_affinity_mask_valid(new_value)) { |
112 | return -EINVAL; | 115 | err = -EINVAL; |
116 | goto out; | ||
117 | } | ||
113 | 118 | ||
114 | /* | 119 | /* |
115 | * Do not allow disabling IRQs completely - it's a too easy | 120 | * Do not allow disabling IRQs completely - it's a too easy |
116 | * way to make the system unusable accidentally :-) At least | 121 | * way to make the system unusable accidentally :-) At least |
117 | * one online CPU still has to be targeted. | 122 | * one online CPU still has to be targeted. |
118 | */ | 123 | */ |
119 | if (!cpus_intersects(new_value, cpu_online_map)) | 124 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
120 | return -EINVAL; | 125 | err = -EINVAL; |
126 | goto out; | ||
127 | } | ||
121 | 128 | ||
122 | irq_default_affinity = new_value; | 129 | cpumask_copy(irq_default_affinity, new_value); |
130 | err = count; | ||
123 | 131 | ||
124 | return count; | 132 | out: |
133 | free_cpumask_var(new_value); | ||
134 | return err; | ||
125 | } | 135 | } |
126 | 136 | ||
127 | static int default_affinity_open(struct inode *inode, struct file *file) | 137 | static int default_affinity_open(struct inode *inode, struct file *file) |