diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 20:09:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 20:09:40 -0400 |
commit | 6e0b7b2c39b91b467270dd0bc383914f99e1fb28 (patch) | |
tree | bdd28cb3ab5653404220d2bd9089203168ef869f | |
parent | e4e47eb15b7884963efe7f98231009c5770a2c3d (diff) | |
parent | 4308ad801193f14ff42cb746da37cf07e35f0d08 (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
genirq: Clear CPU mask in affinity_hint when none is provided
genirq: Add CPU mask affinity hint
genirq: Remove IRQF_DISABLED from core code
genirq: Run irq handlers with interrupts disabled
genirq: Introduce request_any_context_irq()
genirq: Expose irq_desc->node in proc/irq
Fixed up trivial conflicts in Documentation/feature-removal-schedule.txt
-rw-r--r-- | Documentation/feature-removal-schedule.txt | 10 | ||||
-rw-r--r-- | Documentation/filesystems/proc.txt | 4 | ||||
-rw-r--r-- | include/linux/interrupt.h | 32 | ||||
-rw-r--r-- | include/linux/irq.h | 1 | ||||
-rw-r--r-- | kernel/irq/handle.c | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 89 | ||||
-rw-r--r-- | kernel/irq/proc.c | 60 |
7 files changed, 165 insertions, 34 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index a5e381185de9..d9d3fbcb705d 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -589,3 +589,13 @@ Why: The vtx device nodes have been superseded by vbi device nodes | |||
589 | provided by the vtx API, then that functionality should be build | 589 | provided by the vtx API, then that functionality should be build |
590 | around the sliced VBI API instead. | 590 | around the sliced VBI API instead. |
591 | Who: Hans Verkuil <hverkuil@xs4all.nl> | 591 | Who: Hans Verkuil <hverkuil@xs4all.nl> |
592 | |||
593 | ---------------------------- | ||
594 | |||
595 | What: IRQF_DISABLED | ||
596 | When: 2.6.36 | ||
597 | Why: The flag is a NOOP as we run interrupt handlers with interrupts disabled | ||
598 | Who: Thomas Gleixner <tglx@linutronix.de> | ||
599 | |||
600 | ---------------------------- | ||
601 | |||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 1e359b62c40a..fbce915c9181 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -565,6 +565,10 @@ The default_smp_affinity mask applies to all non-active IRQs, which are the | |||
565 | IRQs which have not yet been allocated/activated, and hence which lack a | 565 | IRQs which have not yet been allocated/activated, and hence which lack a |
566 | /proc/irq/[0-9]* directory. | 566 | /proc/irq/[0-9]* directory. |
567 | 567 | ||
568 | The node file on an SMP system shows the node to which the device using the IRQ | ||
569 | reports itself as being attached. This hardware locality information does not | ||
570 | include information about any possible driver locality preference. | ||
571 | |||
568 | prof_cpu_mask specifies which CPUs are to be profiled by the system wide | 572 | prof_cpu_mask specifies which CPUs are to be profiled by the system wide |
569 | profiler. Default value is ffffffff (all cpus). | 573 | profiler. Default value is ffffffff (all cpus). |
570 | 574 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 75f3f00ac1e5..5137db3317f9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -39,7 +39,8 @@ | |||
39 | * These flags used only by the kernel as part of the | 39 | * These flags used only by the kernel as part of the |
40 | * irq handling routines. | 40 | * irq handling routines. |
41 | * | 41 | * |
42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler | 42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. |
43 | * DEPRECATED. This flag is a NOOP and scheduled to be removed | ||
43 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator | 44 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator |
44 | * IRQF_SHARED - allow sharing the irq among several devices | 45 | * IRQF_SHARED - allow sharing the irq among several devices |
45 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | 46 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
@@ -77,6 +78,18 @@ enum { | |||
77 | IRQTF_AFFINITY, | 78 | IRQTF_AFFINITY, |
78 | }; | 79 | }; |
79 | 80 | ||
81 | /** | ||
82 | * These values can be returned by request_any_context_irq() and | ||
83 | * describe the context the interrupt will be run in. | ||
84 | * | ||
85 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context | ||
86 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context | ||
87 | */ | ||
88 | enum { | ||
89 | IRQC_IS_HARDIRQ = 0, | ||
90 | IRQC_IS_NESTED, | ||
91 | }; | ||
92 | |||
80 | typedef irqreturn_t (*irq_handler_t)(int, void *); | 93 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
81 | 94 | ||
82 | /** | 95 | /** |
@@ -120,6 +133,10 @@ request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | |||
120 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | 133 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); |
121 | } | 134 | } |
122 | 135 | ||
136 | extern int __must_check | ||
137 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | ||
138 | unsigned long flags, const char *name, void *dev_id); | ||
139 | |||
123 | extern void exit_irq_thread(void); | 140 | extern void exit_irq_thread(void); |
124 | #else | 141 | #else |
125 | 142 | ||
@@ -141,6 +158,13 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
141 | return request_irq(irq, handler, flags, name, dev); | 158 | return request_irq(irq, handler, flags, name, dev); |
142 | } | 159 | } |
143 | 160 | ||
161 | static inline int __must_check | ||
162 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | ||
163 | unsigned long flags, const char *name, void *dev_id) | ||
164 | { | ||
165 | return request_irq(irq, handler, flags, name, dev_id); | ||
166 | } | ||
167 | |||
144 | static inline void exit_irq_thread(void) { } | 168 | static inline void exit_irq_thread(void) { } |
145 | #endif | 169 | #endif |
146 | 170 | ||
@@ -209,6 +233,7 @@ extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); | |||
209 | extern int irq_can_set_affinity(unsigned int irq); | 233 | extern int irq_can_set_affinity(unsigned int irq); |
210 | extern int irq_select_affinity(unsigned int irq); | 234 | extern int irq_select_affinity(unsigned int irq); |
211 | 235 | ||
236 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | ||
212 | #else /* CONFIG_SMP */ | 237 | #else /* CONFIG_SMP */ |
213 | 238 | ||
214 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 239 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
@@ -223,6 +248,11 @@ static inline int irq_can_set_affinity(unsigned int irq) | |||
223 | 248 | ||
224 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 249 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
225 | 250 | ||
251 | static inline int irq_set_affinity_hint(unsigned int irq, | ||
252 | const struct cpumask *m) | ||
253 | { | ||
254 | return -EINVAL; | ||
255 | } | ||
226 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ | 256 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ |
227 | 257 | ||
228 | #ifdef CONFIG_GENERIC_HARDIRQS | 258 | #ifdef CONFIG_GENERIC_HARDIRQS |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 707ab122e2e6..c03243ad84b4 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -195,6 +195,7 @@ struct irq_desc { | |||
195 | raw_spinlock_t lock; | 195 | raw_spinlock_t lock; |
196 | #ifdef CONFIG_SMP | 196 | #ifdef CONFIG_SMP |
197 | cpumask_var_t affinity; | 197 | cpumask_var_t affinity; |
198 | const struct cpumask *affinity_hint; | ||
198 | unsigned int node; | 199 | unsigned int node; |
199 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 200 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
200 | cpumask_var_t pending_mask; | 201 | cpumask_var_t pending_mask; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 76d5a671bfe1..27e5c6911223 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -370,9 +370,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
370 | irqreturn_t ret, retval = IRQ_NONE; | 370 | irqreturn_t ret, retval = IRQ_NONE; |
371 | unsigned int status = 0; | 371 | unsigned int status = 0; |
372 | 372 | ||
373 | if (!(action->flags & IRQF_DISABLED)) | ||
374 | local_irq_enable_in_hardirq(); | ||
375 | |||
376 | do { | 373 | do { |
377 | trace_irq_handler_entry(irq, action); | 374 | trace_irq_handler_entry(irq, action); |
378 | ret = action->handler(irq, action->dev_id); | 375 | ret = action->handler(irq, action->dev_id); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 704e488730a5..3164ba7ce151 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -138,6 +138,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
141 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | ||
142 | { | ||
143 | struct irq_desc *desc = irq_to_desc(irq); | ||
144 | unsigned long flags; | ||
145 | |||
146 | if (!desc) | ||
147 | return -EINVAL; | ||
148 | |||
149 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
150 | desc->affinity_hint = m; | ||
151 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | ||
156 | |||
141 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 157 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
142 | /* | 158 | /* |
143 | * Generic version of the affinity autoselector. | 159 | * Generic version of the affinity autoselector. |
@@ -757,16 +773,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
757 | if (new->flags & IRQF_ONESHOT) | 773 | if (new->flags & IRQF_ONESHOT) |
758 | desc->status |= IRQ_ONESHOT; | 774 | desc->status |= IRQ_ONESHOT; |
759 | 775 | ||
760 | /* | ||
761 | * Force MSI interrupts to run with interrupts | ||
762 | * disabled. The multi vector cards can cause stack | ||
763 | * overflows due to nested interrupts when enough of | ||
764 | * them are directed to a core and fire at the same | ||
765 | * time. | ||
766 | */ | ||
767 | if (desc->msi_desc) | ||
768 | new->flags |= IRQF_DISABLED; | ||
769 | |||
770 | if (!(desc->status & IRQ_NOAUTOEN)) { | 776 | if (!(desc->status & IRQ_NOAUTOEN)) { |
771 | desc->depth = 0; | 777 | desc->depth = 0; |
772 | desc->status &= ~IRQ_DISABLED; | 778 | desc->status &= ~IRQ_DISABLED; |
@@ -916,6 +922,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
916 | desc->chip->disable(irq); | 922 | desc->chip->disable(irq); |
917 | } | 923 | } |
918 | 924 | ||
925 | #ifdef CONFIG_SMP | ||
926 | /* make sure affinity_hint is cleaned up */ | ||
927 | if (WARN_ON_ONCE(desc->affinity_hint)) | ||
928 | desc->affinity_hint = NULL; | ||
929 | #endif | ||
930 | |||
919 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 931 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
920 | 932 | ||
921 | unregister_handler_proc(irq, action); | 933 | unregister_handler_proc(irq, action); |
@@ -1027,7 +1039,6 @@ EXPORT_SYMBOL(free_irq); | |||
1027 | * Flags: | 1039 | * Flags: |
1028 | * | 1040 | * |
1029 | * IRQF_SHARED Interrupt is shared | 1041 | * IRQF_SHARED Interrupt is shared |
1030 | * IRQF_DISABLED Disable local interrupts while processing | ||
1031 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 1042 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
1032 | * IRQF_TRIGGER_* Specify active edge(s) or level | 1043 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1033 | * | 1044 | * |
@@ -1041,25 +1052,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1041 | int retval; | 1052 | int retval; |
1042 | 1053 | ||
1043 | /* | 1054 | /* |
1044 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
1045 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
1046 | * the behavior is classified as "will not fix" so we need to | ||
1047 | * start nudging drivers away from using that idiom. | ||
1048 | */ | ||
1049 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == | ||
1050 | (IRQF_SHARED|IRQF_DISABLED)) { | ||
1051 | pr_warning( | ||
1052 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", | ||
1053 | irq, devname); | ||
1054 | } | ||
1055 | |||
1056 | #ifdef CONFIG_LOCKDEP | ||
1057 | /* | ||
1058 | * Lockdep wants atomic interrupt handlers: | ||
1059 | */ | ||
1060 | irqflags |= IRQF_DISABLED; | ||
1061 | #endif | ||
1062 | /* | ||
1063 | * Sanity-check: shared interrupts must pass in a real dev-ID, | 1055 | * Sanity-check: shared interrupts must pass in a real dev-ID, |
1064 | * otherwise we'll have trouble later trying to figure out | 1056 | * otherwise we'll have trouble later trying to figure out |
1065 | * which interrupt is which (messes up the interrupt freeing | 1057 | * which interrupt is which (messes up the interrupt freeing |
@@ -1120,3 +1112,40 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1120 | return retval; | 1112 | return retval; |
1121 | } | 1113 | } |
1122 | EXPORT_SYMBOL(request_threaded_irq); | 1114 | EXPORT_SYMBOL(request_threaded_irq); |
1115 | |||
1116 | /** | ||
1117 | * request_any_context_irq - allocate an interrupt line | ||
1118 | * @irq: Interrupt line to allocate | ||
1119 | * @handler: Function to be called when the IRQ occurs. | ||
1120 | * Threaded handler for threaded interrupts. | ||
1121 | * @flags: Interrupt type flags | ||
1122 | * @name: An ascii name for the claiming device | ||
1123 | * @dev_id: A cookie passed back to the handler function | ||
1124 | * | ||
1125 | * This call allocates interrupt resources and enables the | ||
1126 | * interrupt line and IRQ handling. It selects either a | ||
1127 | * hardirq or threaded handling method depending on the | ||
1128 | * context. | ||
1129 | * | ||
1130 | * On failure, it returns a negative value. On success, | ||
1131 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | ||
1132 | */ | ||
1133 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | ||
1134 | unsigned long flags, const char *name, void *dev_id) | ||
1135 | { | ||
1136 | struct irq_desc *desc = irq_to_desc(irq); | ||
1137 | int ret; | ||
1138 | |||
1139 | if (!desc) | ||
1140 | return -EINVAL; | ||
1141 | |||
1142 | if (desc->status & IRQ_NESTED_THREAD) { | ||
1143 | ret = request_threaded_irq(irq, NULL, handler, | ||
1144 | flags, name, dev_id); | ||
1145 | return !ret ? IRQC_IS_NESTED : ret; | ||
1146 | } | ||
1147 | |||
1148 | ret = request_irq(irq, handler, flags, name, dev_id); | ||
1149 | return !ret ? IRQC_IS_HARDIRQ : ret; | ||
1150 | } | ||
1151 | EXPORT_SYMBOL_GPL(request_any_context_irq); | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 7a6eb04ef6b5..09a2ee540bd2 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -32,6 +32,27 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v) | |||
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
34 | 34 | ||
35 | static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) | ||
36 | { | ||
37 | struct irq_desc *desc = irq_to_desc((long)m->private); | ||
38 | unsigned long flags; | ||
39 | cpumask_var_t mask; | ||
40 | |||
41 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
42 | return -ENOMEM; | ||
43 | |||
44 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
45 | if (desc->affinity_hint) | ||
46 | cpumask_copy(mask, desc->affinity_hint); | ||
47 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
48 | |||
49 | seq_cpumask(m, mask); | ||
50 | seq_putc(m, '\n'); | ||
51 | free_cpumask_var(mask); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
35 | #ifndef is_affinity_mask_valid | 56 | #ifndef is_affinity_mask_valid |
36 | #define is_affinity_mask_valid(val) 1 | 57 | #define is_affinity_mask_valid(val) 1 |
37 | #endif | 58 | #endif |
@@ -84,6 +105,11 @@ static int irq_affinity_proc_open(struct inode *inode, struct file *file) | |||
84 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); | 105 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); |
85 | } | 106 | } |
86 | 107 | ||
108 | static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) | ||
109 | { | ||
110 | return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); | ||
111 | } | ||
112 | |||
87 | static const struct file_operations irq_affinity_proc_fops = { | 113 | static const struct file_operations irq_affinity_proc_fops = { |
88 | .open = irq_affinity_proc_open, | 114 | .open = irq_affinity_proc_open, |
89 | .read = seq_read, | 115 | .read = seq_read, |
@@ -92,6 +118,13 @@ static const struct file_operations irq_affinity_proc_fops = { | |||
92 | .write = irq_affinity_proc_write, | 118 | .write = irq_affinity_proc_write, |
93 | }; | 119 | }; |
94 | 120 | ||
121 | static const struct file_operations irq_affinity_hint_proc_fops = { | ||
122 | .open = irq_affinity_hint_proc_open, | ||
123 | .read = seq_read, | ||
124 | .llseek = seq_lseek, | ||
125 | .release = single_release, | ||
126 | }; | ||
127 | |||
95 | static int default_affinity_show(struct seq_file *m, void *v) | 128 | static int default_affinity_show(struct seq_file *m, void *v) |
96 | { | 129 | { |
97 | seq_cpumask(m, irq_default_affinity); | 130 | seq_cpumask(m, irq_default_affinity); |
@@ -147,6 +180,26 @@ static const struct file_operations default_affinity_proc_fops = { | |||
147 | .release = single_release, | 180 | .release = single_release, |
148 | .write = default_affinity_write, | 181 | .write = default_affinity_write, |
149 | }; | 182 | }; |
183 | |||
184 | static int irq_node_proc_show(struct seq_file *m, void *v) | ||
185 | { | ||
186 | struct irq_desc *desc = irq_to_desc((long) m->private); | ||
187 | |||
188 | seq_printf(m, "%d\n", desc->node); | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static int irq_node_proc_open(struct inode *inode, struct file *file) | ||
193 | { | ||
194 | return single_open(file, irq_node_proc_show, PDE(inode)->data); | ||
195 | } | ||
196 | |||
197 | static const struct file_operations irq_node_proc_fops = { | ||
198 | .open = irq_node_proc_open, | ||
199 | .read = seq_read, | ||
200 | .llseek = seq_lseek, | ||
201 | .release = single_release, | ||
202 | }; | ||
150 | #endif | 203 | #endif |
151 | 204 | ||
152 | static int irq_spurious_proc_show(struct seq_file *m, void *v) | 205 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
@@ -231,6 +284,13 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
231 | /* create /proc/irq/<irq>/smp_affinity */ | 284 | /* create /proc/irq/<irq>/smp_affinity */ |
232 | proc_create_data("smp_affinity", 0600, desc->dir, | 285 | proc_create_data("smp_affinity", 0600, desc->dir, |
233 | &irq_affinity_proc_fops, (void *)(long)irq); | 286 | &irq_affinity_proc_fops, (void *)(long)irq); |
287 | |||
288 | /* create /proc/irq/<irq>/affinity_hint */ | ||
289 | proc_create_data("affinity_hint", 0400, desc->dir, | ||
290 | &irq_affinity_hint_proc_fops, (void *)(long)irq); | ||
291 | |||
292 | proc_create_data("node", 0444, desc->dir, | ||
293 | &irq_node_proc_fops, (void *)(long)irq); | ||
234 | #endif | 294 | #endif |
235 | 295 | ||
236 | proc_create_data("spurious", 0444, desc->dir, | 296 | proc_create_data("spurious", 0444, desc->dir, |