aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/Kconfig74
-rw-r--r--kernel/irq/Makefile4
-rw-r--r--kernel/irq/autoprobe.c57
-rw-r--r--kernel/irq/chip.c737
-rw-r--r--kernel/irq/debug.h45
-rw-r--r--kernel/irq/dummychip.c59
-rw-r--r--kernel/irq/generic-chip.c368
-rw-r--r--kernel/irq/handle.c559
-rw-r--r--kernel/irq/internals.h174
-rw-r--r--kernel/irq/irqdesc.c466
-rw-r--r--kernel/irq/manage.c690
-rw-r--r--kernel/irq/migration.c43
-rw-r--r--kernel/irq/numa_migrate.c120
-rw-r--r--kernel/irq/pm.c30
-rw-r--r--kernel/irq/proc.c169
-rw-r--r--kernel/irq/resend.c21
-rw-r--r--kernel/irq/settings.h142
-rw-r--r--kernel/irq/spurious.c195
18 files changed, 2534 insertions, 1419 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
new file mode 100644
index 000000000000..d1d051b38e0b
--- /dev/null
+++ b/kernel/irq/Kconfig
@@ -0,0 +1,74 @@
1# Select this to activate the generic irq options below
2config HAVE_GENERIC_HARDIRQS
3 bool
4
5if HAVE_GENERIC_HARDIRQS
6menu "IRQ subsystem"
7#
8# Interrupt subsystem related configuration options
9#
10config GENERIC_HARDIRQS
11 def_bool y
12
13# Options selectable by the architecture code
14
15# Make sparse irq Kconfig switch below available
16config HAVE_SPARSE_IRQ
17 bool
18
19# Enable the generic irq autoprobe mechanism
20config GENERIC_IRQ_PROBE
21 bool
22
23# Use the generic /proc/interrupts implementation
24config GENERIC_IRQ_SHOW
25 bool
26
27# Print level/edge extra information
28config GENERIC_IRQ_SHOW_LEVEL
29 bool
30
31# Support for delayed migration from interrupt context
32config GENERIC_PENDING_IRQ
33 bool
34
35# Alpha specific irq affinity mechanism
36config AUTO_IRQ_AFFINITY
37 bool
38
39# Tasklet based software resend for pending interrupts on enable_irq()
40config HARDIRQS_SW_RESEND
41 bool
42
43# Preflow handler support for fasteoi (sparc64)
44config IRQ_PREFLOW_FASTEOI
45 bool
46
47# Edge style eoi based handler (cell)
48config IRQ_EDGE_EOI_HANDLER
49 bool
50
51# Generic configurable interrupt chip implementation
52config GENERIC_IRQ_CHIP
53 bool
54
55# Support forced irq threading
56config IRQ_FORCED_THREADING
57 bool
58
59config SPARSE_IRQ
60 bool "Support sparse irq numbering"
61 depends on HAVE_SPARSE_IRQ
62 ---help---
63
64 Sparse irq numbering is useful for distro kernels that want
65 to define a high CONFIG_NR_CPUS value but still want to have
66 low kernel memory footprint on smaller machines.
67
68 ( Sparse irqs can also be beneficial on NUMA boxes, as they spread
69 out the interrupt descriptors in a more NUMA-friendly way. )
70
71 If you don't know what to do here, say N.
72
73endmenu
74endif
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 7d047808419d..73290056cfb6 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,7 +1,7 @@
1 1
2obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o 2obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
3obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 4obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 5obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 6obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
6obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o
7obj-$(CONFIG_PM_SLEEP) += pm.o 7obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 2295a31ef110..342d8f44e401 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -17,7 +17,7 @@
17/* 17/*
18 * Autodetection depends on the fact that any interrupt that 18 * Autodetection depends on the fact that any interrupt that
19 * comes in on to an unassigned handler will get stuck with 19 * comes in on to an unassigned handler will get stuck with
20 * "IRQ_WAITING" cleared and the interrupt disabled. 20 * "IRQS_WAITING" cleared and the interrupt disabled.
21 */ 21 */
22static DEFINE_MUTEX(probing_active); 22static DEFINE_MUTEX(probing_active);
23 23
@@ -32,7 +32,6 @@ unsigned long probe_irq_on(void)
32{ 32{
33 struct irq_desc *desc; 33 struct irq_desc *desc;
34 unsigned long mask = 0; 34 unsigned long mask = 0;
35 unsigned int status;
36 int i; 35 int i;
37 36
38 /* 37 /*
@@ -46,20 +45,15 @@ unsigned long probe_irq_on(void)
46 */ 45 */
47 for_each_irq_desc_reverse(i, desc) { 46 for_each_irq_desc_reverse(i, desc) {
48 raw_spin_lock_irq(&desc->lock); 47 raw_spin_lock_irq(&desc->lock);
49 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 48 if (!desc->action && irq_settings_can_probe(desc)) {
50 /*
51 * An old-style architecture might still have
52 * the handle_bad_irq handler there:
53 */
54 compat_irq_chip_set_default_handler(desc);
55
56 /* 49 /*
57 * Some chips need to know about probing in 50 * Some chips need to know about probing in
58 * progress: 51 * progress:
59 */ 52 */
60 if (desc->chip->set_type) 53 if (desc->irq_data.chip->irq_set_type)
61 desc->chip->set_type(i, IRQ_TYPE_PROBE); 54 desc->irq_data.chip->irq_set_type(&desc->irq_data,
62 desc->chip->startup(i); 55 IRQ_TYPE_PROBE);
56 irq_startup(desc);
63 } 57 }
64 raw_spin_unlock_irq(&desc->lock); 58 raw_spin_unlock_irq(&desc->lock);
65 } 59 }
@@ -74,10 +68,10 @@ unsigned long probe_irq_on(void)
74 */ 68 */
75 for_each_irq_desc_reverse(i, desc) { 69 for_each_irq_desc_reverse(i, desc) {
76 raw_spin_lock_irq(&desc->lock); 70 raw_spin_lock_irq(&desc->lock);
77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 71 if (!desc->action && irq_settings_can_probe(desc)) {
78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
79 if (desc->chip->startup(i)) 73 if (irq_startup(desc))
80 desc->status |= IRQ_PENDING; 74 desc->istate |= IRQS_PENDING;
81 } 75 }
82 raw_spin_unlock_irq(&desc->lock); 76 raw_spin_unlock_irq(&desc->lock);
83 } 77 }
@@ -92,13 +86,12 @@ unsigned long probe_irq_on(void)
92 */ 86 */
93 for_each_irq_desc(i, desc) { 87 for_each_irq_desc(i, desc) {
94 raw_spin_lock_irq(&desc->lock); 88 raw_spin_lock_irq(&desc->lock);
95 status = desc->status;
96 89
97 if (status & IRQ_AUTODETECT) { 90 if (desc->istate & IRQS_AUTODETECT) {
98 /* It triggered already - consider it spurious. */ 91 /* It triggered already - consider it spurious. */
99 if (!(status & IRQ_WAITING)) { 92 if (!(desc->istate & IRQS_WAITING)) {
100 desc->status = status & ~IRQ_AUTODETECT; 93 desc->istate &= ~IRQS_AUTODETECT;
101 desc->chip->shutdown(i); 94 irq_shutdown(desc);
102 } else 95 } else
103 if (i < 32) 96 if (i < 32)
104 mask |= 1 << i; 97 mask |= 1 << i;
@@ -124,20 +117,18 @@ EXPORT_SYMBOL(probe_irq_on);
124 */ 117 */
125unsigned int probe_irq_mask(unsigned long val) 118unsigned int probe_irq_mask(unsigned long val)
126{ 119{
127 unsigned int status, mask = 0; 120 unsigned int mask = 0;
128 struct irq_desc *desc; 121 struct irq_desc *desc;
129 int i; 122 int i;
130 123
131 for_each_irq_desc(i, desc) { 124 for_each_irq_desc(i, desc) {
132 raw_spin_lock_irq(&desc->lock); 125 raw_spin_lock_irq(&desc->lock);
133 status = desc->status; 126 if (desc->istate & IRQS_AUTODETECT) {
134 127 if (i < 16 && !(desc->istate & IRQS_WAITING))
135 if (status & IRQ_AUTODETECT) {
136 if (i < 16 && !(status & IRQ_WAITING))
137 mask |= 1 << i; 128 mask |= 1 << i;
138 129
139 desc->status = status & ~IRQ_AUTODETECT; 130 desc->istate &= ~IRQS_AUTODETECT;
140 desc->chip->shutdown(i); 131 irq_shutdown(desc);
141 } 132 }
142 raw_spin_unlock_irq(&desc->lock); 133 raw_spin_unlock_irq(&desc->lock);
143 } 134 }
@@ -168,20 +159,18 @@ int probe_irq_off(unsigned long val)
168{ 159{
169 int i, irq_found = 0, nr_of_irqs = 0; 160 int i, irq_found = 0, nr_of_irqs = 0;
170 struct irq_desc *desc; 161 struct irq_desc *desc;
171 unsigned int status;
172 162
173 for_each_irq_desc(i, desc) { 163 for_each_irq_desc(i, desc) {
174 raw_spin_lock_irq(&desc->lock); 164 raw_spin_lock_irq(&desc->lock);
175 status = desc->status;
176 165
177 if (status & IRQ_AUTODETECT) { 166 if (desc->istate & IRQS_AUTODETECT) {
178 if (!(status & IRQ_WAITING)) { 167 if (!(desc->istate & IRQS_WAITING)) {
179 if (!nr_of_irqs) 168 if (!nr_of_irqs)
180 irq_found = i; 169 irq_found = i;
181 nr_of_irqs++; 170 nr_of_irqs++;
182 } 171 }
183 desc->status = status & ~IRQ_AUTODETECT; 172 desc->istate &= ~IRQS_AUTODETECT;
184 desc->chip->shutdown(i); 173 irq_shutdown(desc);
185 } 174 }
186 raw_spin_unlock_irq(&desc->lock); 175 raw_spin_unlock_irq(&desc->lock);
187 } 176 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b7091d5ca2f8..d5a3009da71a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,363 +18,217 @@
18 18
19#include "internals.h" 19#include "internals.h"
20 20
21static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22{
23 struct irq_desc *desc;
24 unsigned long flags;
25
26 desc = irq_to_desc(irq);
27 if (!desc) {
28 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
29 return;
30 }
31
32 /* Ensure we don't have left over values from a previous use of this irq */
33 raw_spin_lock_irqsave(&desc->lock, flags);
34 desc->status = IRQ_DISABLED;
35 desc->chip = &no_irq_chip;
36 desc->handle_irq = handle_bad_irq;
37 desc->depth = 1;
38 desc->msi_desc = NULL;
39 desc->handler_data = NULL;
40 if (!keep_chip_data)
41 desc->chip_data = NULL;
42 desc->action = NULL;
43 desc->irq_count = 0;
44 desc->irqs_unhandled = 0;
45#ifdef CONFIG_SMP
46 cpumask_setall(desc->affinity);
47#ifdef CONFIG_GENERIC_PENDING_IRQ
48 cpumask_clear(desc->pending_mask);
49#endif
50#endif
51 raw_spin_unlock_irqrestore(&desc->lock, flags);
52}
53
54/** 21/**
55 * dynamic_irq_init - initialize a dynamically allocated irq 22 * irq_set_chip - set the irq chip for an irq
56 * @irq: irq number to initialize
57 */
58void dynamic_irq_init(unsigned int irq)
59{
60 dynamic_irq_init_x(irq, false);
61}
62
63/**
64 * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65 * @irq: irq number to initialize
66 *
67 * does not set irq_to_desc(irq)->chip_data to NULL
68 */
69void dynamic_irq_init_keep_chip_data(unsigned int irq)
70{
71 dynamic_irq_init_x(irq, true);
72}
73
74static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
75{
76 struct irq_desc *desc = irq_to_desc(irq);
77 unsigned long flags;
78
79 if (!desc) {
80 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
81 return;
82 }
83
84 raw_spin_lock_irqsave(&desc->lock, flags);
85 if (desc->action) {
86 raw_spin_unlock_irqrestore(&desc->lock, flags);
87 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
88 irq);
89 return;
90 }
91 desc->msi_desc = NULL;
92 desc->handler_data = NULL;
93 if (!keep_chip_data)
94 desc->chip_data = NULL;
95 desc->handle_irq = handle_bad_irq;
96 desc->chip = &no_irq_chip;
97 desc->name = NULL;
98 clear_kstat_irqs(desc);
99 raw_spin_unlock_irqrestore(&desc->lock, flags);
100}
101
102/**
103 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
104 * @irq: irq number to initialize
105 */
106void dynamic_irq_cleanup(unsigned int irq)
107{
108 dynamic_irq_cleanup_x(irq, false);
109}
110
111/**
112 * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113 * @irq: irq number to initialize
114 *
115 * does not set irq_to_desc(irq)->chip_data to NULL
116 */
117void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118{
119 dynamic_irq_cleanup_x(irq, true);
120}
121
122
123/**
124 * set_irq_chip - set the irq chip for an irq
125 * @irq: irq number 23 * @irq: irq number
126 * @chip: pointer to irq chip description structure 24 * @chip: pointer to irq chip description structure
127 */ 25 */
128int set_irq_chip(unsigned int irq, struct irq_chip *chip) 26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
129{ 27{
130 struct irq_desc *desc = irq_to_desc(irq);
131 unsigned long flags; 28 unsigned long flags;
29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
132 30
133 if (!desc) { 31 if (!desc)
134 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
135 return -EINVAL; 32 return -EINVAL;
136 }
137 33
138 if (!chip) 34 if (!chip)
139 chip = &no_irq_chip; 35 chip = &no_irq_chip;
140 36
141 raw_spin_lock_irqsave(&desc->lock, flags); 37 desc->irq_data.chip = chip;
142 irq_chip_set_defaults(chip); 38 irq_put_desc_unlock(desc, flags);
143 desc->chip = chip; 39 /*
144 raw_spin_unlock_irqrestore(&desc->lock, flags); 40 * For !CONFIG_SPARSE_IRQ make the irq show up in
145 41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
42 * already marked, and this call is harmless.
43 */
44 irq_reserve_irq(irq);
146 return 0; 45 return 0;
147} 46}
148EXPORT_SYMBOL(set_irq_chip); 47EXPORT_SYMBOL(irq_set_chip);
149 48
150/** 49/**
151 * set_irq_type - set the irq trigger type for an irq 50 * irq_set_type - set the irq trigger type for an irq
152 * @irq: irq number 51 * @irq: irq number
153 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 52 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
154 */ 53 */
155int set_irq_type(unsigned int irq, unsigned int type) 54int irq_set_irq_type(unsigned int irq, unsigned int type)
156{ 55{
157 struct irq_desc *desc = irq_to_desc(irq);
158 unsigned long flags; 56 unsigned long flags;
159 int ret = -ENXIO; 57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
58 int ret = 0;
160 59
161 if (!desc) { 60 if (!desc)
162 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); 61 return -EINVAL;
163 return -ENODEV;
164 }
165 62
166 type &= IRQ_TYPE_SENSE_MASK; 63 type &= IRQ_TYPE_SENSE_MASK;
167 if (type == IRQ_TYPE_NONE) 64 if (type != IRQ_TYPE_NONE)
168 return 0; 65 ret = __irq_set_trigger(desc, irq, type);
169 66 irq_put_desc_busunlock(desc, flags);
170 raw_spin_lock_irqsave(&desc->lock, flags);
171 ret = __irq_set_trigger(desc, irq, type);
172 raw_spin_unlock_irqrestore(&desc->lock, flags);
173 return ret; 67 return ret;
174} 68}
175EXPORT_SYMBOL(set_irq_type); 69EXPORT_SYMBOL(irq_set_irq_type);
176 70
177/** 71/**
178 * set_irq_data - set irq type data for an irq 72 * irq_set_handler_data - set irq handler data for an irq
179 * @irq: Interrupt number 73 * @irq: Interrupt number
180 * @data: Pointer to interrupt specific data 74 * @data: Pointer to interrupt specific data
181 * 75 *
182 * Set the hardware irq controller data for an irq 76 * Set the hardware irq controller data for an irq
183 */ 77 */
184int set_irq_data(unsigned int irq, void *data) 78int irq_set_handler_data(unsigned int irq, void *data)
185{ 79{
186 struct irq_desc *desc = irq_to_desc(irq);
187 unsigned long flags; 80 unsigned long flags;
81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
188 82
189 if (!desc) { 83 if (!desc)
190 printk(KERN_ERR
191 "Trying to install controller data for IRQ%d\n", irq);
192 return -EINVAL; 84 return -EINVAL;
193 } 85 desc->irq_data.handler_data = data;
194 86 irq_put_desc_unlock(desc, flags);
195 raw_spin_lock_irqsave(&desc->lock, flags);
196 desc->handler_data = data;
197 raw_spin_unlock_irqrestore(&desc->lock, flags);
198 return 0; 87 return 0;
199} 88}
200EXPORT_SYMBOL(set_irq_data); 89EXPORT_SYMBOL(irq_set_handler_data);
201 90
202/** 91/**
203 * set_irq_msi - set MSI descriptor data for an irq 92 * irq_set_msi_desc - set MSI descriptor data for an irq
204 * @irq: Interrupt number 93 * @irq: Interrupt number
205 * @entry: Pointer to MSI descriptor data 94 * @entry: Pointer to MSI descriptor data
206 * 95 *
207 * Set the MSI descriptor entry for an irq 96 * Set the MSI descriptor entry for an irq
208 */ 97 */
209int set_irq_msi(unsigned int irq, struct msi_desc *entry) 98int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
210{ 99{
211 struct irq_desc *desc = irq_to_desc(irq);
212 unsigned long flags; 100 unsigned long flags;
101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
213 102
214 if (!desc) { 103 if (!desc)
215 printk(KERN_ERR
216 "Trying to install msi data for IRQ%d\n", irq);
217 return -EINVAL; 104 return -EINVAL;
218 } 105 desc->irq_data.msi_desc = entry;
219
220 raw_spin_lock_irqsave(&desc->lock, flags);
221 desc->msi_desc = entry;
222 if (entry) 106 if (entry)
223 entry->irq = irq; 107 entry->irq = irq;
224 raw_spin_unlock_irqrestore(&desc->lock, flags); 108 irq_put_desc_unlock(desc, flags);
225 return 0; 109 return 0;
226} 110}
227 111
228/** 112/**
229 * set_irq_chip_data - set irq chip data for an irq 113 * irq_set_chip_data - set irq chip data for an irq
230 * @irq: Interrupt number 114 * @irq: Interrupt number
231 * @data: Pointer to chip specific data 115 * @data: Pointer to chip specific data
232 * 116 *
233 * Set the hardware irq chip data for an irq 117 * Set the hardware irq chip data for an irq
234 */ 118 */
235int set_irq_chip_data(unsigned int irq, void *data) 119int irq_set_chip_data(unsigned int irq, void *data)
236{ 120{
237 struct irq_desc *desc = irq_to_desc(irq);
238 unsigned long flags; 121 unsigned long flags;
122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
239 123
240 if (!desc) { 124 if (!desc)
241 printk(KERN_ERR
242 "Trying to install chip data for IRQ%d\n", irq);
243 return -EINVAL;
244 }
245
246 if (!desc->chip) {
247 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
248 return -EINVAL; 125 return -EINVAL;
249 } 126 desc->irq_data.chip_data = data;
250 127 irq_put_desc_unlock(desc, flags);
251 raw_spin_lock_irqsave(&desc->lock, flags);
252 desc->chip_data = data;
253 raw_spin_unlock_irqrestore(&desc->lock, flags);
254
255 return 0; 128 return 0;
256} 129}
257EXPORT_SYMBOL(set_irq_chip_data); 130EXPORT_SYMBOL(irq_set_chip_data);
258 131
259/** 132struct irq_data *irq_get_irq_data(unsigned int irq)
260 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
261 *
262 * @irq: Interrupt number
263 * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag
264 *
265 * The IRQ_NESTED_THREAD flag indicates that on
266 * request_threaded_irq() no separate interrupt thread should be
267 * created for the irq as the handler are called nested in the
268 * context of a demultiplexing interrupt handler thread.
269 */
270void set_irq_nested_thread(unsigned int irq, int nest)
271{ 133{
272 struct irq_desc *desc = irq_to_desc(irq); 134 struct irq_desc *desc = irq_to_desc(irq);
273 unsigned long flags;
274 135
275 if (!desc) 136 return desc ? &desc->irq_data : NULL;
276 return; 137}
138EXPORT_SYMBOL_GPL(irq_get_irq_data);
277 139
278 raw_spin_lock_irqsave(&desc->lock, flags); 140static void irq_state_clr_disabled(struct irq_desc *desc)
279 if (nest) 141{
280 desc->status |= IRQ_NESTED_THREAD; 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
281 else
282 desc->status &= ~IRQ_NESTED_THREAD;
283 raw_spin_unlock_irqrestore(&desc->lock, flags);
284} 143}
285EXPORT_SYMBOL_GPL(set_irq_nested_thread);
286 144
287/* 145static void irq_state_set_disabled(struct irq_desc *desc)
288 * default enable function
289 */
290static void default_enable(unsigned int irq)
291{ 146{
292 struct irq_desc *desc = irq_to_desc(irq); 147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148}
293 149
294 desc->chip->unmask(irq); 150static void irq_state_clr_masked(struct irq_desc *desc)
295 desc->status &= ~IRQ_MASKED; 151{
152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
296} 153}
297 154
298/* 155static void irq_state_set_masked(struct irq_desc *desc)
299 * default disable function
300 */
301static void default_disable(unsigned int irq)
302{ 156{
157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
303} 158}
304 159
305/* 160int irq_startup(struct irq_desc *desc)
306 * default startup function
307 */
308static unsigned int default_startup(unsigned int irq)
309{ 161{
310 struct irq_desc *desc = irq_to_desc(irq); 162 irq_state_clr_disabled(desc);
163 desc->depth = 0;
311 164
312 desc->chip->enable(irq); 165 if (desc->irq_data.chip->irq_startup) {
166 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167 irq_state_clr_masked(desc);
168 return ret;
169 }
170
171 irq_enable(desc);
313 return 0; 172 return 0;
314} 173}
315 174
316/* 175void irq_shutdown(struct irq_desc *desc)
317 * default shutdown function
318 */
319static void default_shutdown(unsigned int irq)
320{ 176{
321 struct irq_desc *desc = irq_to_desc(irq); 177 irq_state_set_disabled(desc);
178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data);
185 irq_state_set_masked(desc);
186}
322 187
323 desc->chip->mask(irq); 188void irq_enable(struct irq_desc *desc)
324 desc->status |= IRQ_MASKED; 189{
190 irq_state_clr_disabled(desc);
191 if (desc->irq_data.chip->irq_enable)
192 desc->irq_data.chip->irq_enable(&desc->irq_data);
193 else
194 desc->irq_data.chip->irq_unmask(&desc->irq_data);
195 irq_state_clr_masked(desc);
325} 196}
326 197
327/* 198void irq_disable(struct irq_desc *desc)
328 * Fixup enable/disable function pointers
329 */
330void irq_chip_set_defaults(struct irq_chip *chip)
331{ 199{
332 if (!chip->enable) 200 irq_state_set_disabled(desc);
333 chip->enable = default_enable; 201 if (desc->irq_data.chip->irq_disable) {
334 if (!chip->disable) 202 desc->irq_data.chip->irq_disable(&desc->irq_data);
335 chip->disable = default_disable; 203 irq_state_set_masked(desc);
336 if (!chip->startup) 204 }
337 chip->startup = default_startup;
338 /*
339 * We use chip->disable, when the user provided its own. When
340 * we have default_disable set for chip->disable, then we need
341 * to use default_shutdown, otherwise the irq line is not
342 * disabled on free_irq():
343 */
344 if (!chip->shutdown)
345 chip->shutdown = chip->disable != default_disable ?
346 chip->disable : default_shutdown;
347 if (!chip->name)
348 chip->name = chip->typename;
349 if (!chip->end)
350 chip->end = dummy_irq_chip.end;
351} 205}
352 206
353static inline void mask_ack_irq(struct irq_desc *desc, int irq) 207static inline void mask_ack_irq(struct irq_desc *desc)
354{ 208{
355 if (desc->chip->mask_ack) 209 if (desc->irq_data.chip->irq_mask_ack)
356 desc->chip->mask_ack(irq); 210 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
357 else { 211 else {
358 desc->chip->mask(irq); 212 desc->irq_data.chip->irq_mask(&desc->irq_data);
359 if (desc->chip->ack) 213 if (desc->irq_data.chip->irq_ack)
360 desc->chip->ack(irq); 214 desc->irq_data.chip->irq_ack(&desc->irq_data);
361 } 215 }
362 desc->status |= IRQ_MASKED; 216 irq_state_set_masked(desc);
363} 217}
364 218
365static inline void mask_irq(struct irq_desc *desc, int irq) 219void mask_irq(struct irq_desc *desc)
366{ 220{
367 if (desc->chip->mask) { 221 if (desc->irq_data.chip->irq_mask) {
368 desc->chip->mask(irq); 222 desc->irq_data.chip->irq_mask(&desc->irq_data);
369 desc->status |= IRQ_MASKED; 223 irq_state_set_masked(desc);
370 } 224 }
371} 225}
372 226
373static inline void unmask_irq(struct irq_desc *desc, int irq) 227void unmask_irq(struct irq_desc *desc)
374{ 228{
375 if (desc->chip->unmask) { 229 if (desc->irq_data.chip->irq_unmask) {
376 desc->chip->unmask(irq); 230 desc->irq_data.chip->irq_unmask(&desc->irq_data);
377 desc->status &= ~IRQ_MASKED; 231 irq_state_clr_masked(desc);
378 } 232 }
379} 233}
380 234
@@ -399,10 +253,10 @@ void handle_nested_irq(unsigned int irq)
399 kstat_incr_irqs_this_cpu(irq, desc); 253 kstat_incr_irqs_this_cpu(irq, desc);
400 254
401 action = desc->action; 255 action = desc->action;
402 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
403 goto out_unlock; 257 goto out_unlock;
404 258
405 desc->status |= IRQ_INPROGRESS; 259 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
406 raw_spin_unlock_irq(&desc->lock); 260 raw_spin_unlock_irq(&desc->lock);
407 261
408 action_ret = action->thread_fn(action->irq, action->dev_id); 262 action_ret = action->thread_fn(action->irq, action->dev_id);
@@ -410,13 +264,20 @@ void handle_nested_irq(unsigned int irq)
410 note_interrupt(irq, desc, action_ret); 264 note_interrupt(irq, desc, action_ret);
411 265
412 raw_spin_lock_irq(&desc->lock); 266 raw_spin_lock_irq(&desc->lock);
413 desc->status &= ~IRQ_INPROGRESS; 267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
414 268
415out_unlock: 269out_unlock:
416 raw_spin_unlock_irq(&desc->lock); 270 raw_spin_unlock_irq(&desc->lock);
417} 271}
418EXPORT_SYMBOL_GPL(handle_nested_irq); 272EXPORT_SYMBOL_GPL(handle_nested_irq);
419 273
274static bool irq_check_poll(struct irq_desc *desc)
275{
276 if (!(desc->istate & IRQS_POLL_INPROGRESS))
277 return false;
278 return irq_wait_for_poll(desc);
279}
280
420/** 281/**
421 * handle_simple_irq - Simple and software-decoded IRQs. 282 * handle_simple_irq - Simple and software-decoded IRQs.
422 * @irq: the interrupt number 283 * @irq: the interrupt number
@@ -432,32 +293,24 @@ EXPORT_SYMBOL_GPL(handle_nested_irq);
432void 293void
433handle_simple_irq(unsigned int irq, struct irq_desc *desc) 294handle_simple_irq(unsigned int irq, struct irq_desc *desc)
434{ 295{
435 struct irqaction *action;
436 irqreturn_t action_ret;
437
438 raw_spin_lock(&desc->lock); 296 raw_spin_lock(&desc->lock);
439 297
440 if (unlikely(desc->status & IRQ_INPROGRESS)) 298 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
441 goto out_unlock; 299 if (!irq_check_poll(desc))
442 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 300 goto out_unlock;
301
302 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
443 kstat_incr_irqs_this_cpu(irq, desc); 303 kstat_incr_irqs_this_cpu(irq, desc);
444 304
445 action = desc->action; 305 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
446 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
447 goto out_unlock; 306 goto out_unlock;
448 307
449 desc->status |= IRQ_INPROGRESS; 308 handle_irq_event(desc);
450 raw_spin_unlock(&desc->lock);
451 309
452 action_ret = handle_IRQ_event(irq, action);
453 if (!noirqdebug)
454 note_interrupt(irq, desc, action_ret);
455
456 raw_spin_lock(&desc->lock);
457 desc->status &= ~IRQ_INPROGRESS;
458out_unlock: 310out_unlock:
459 raw_spin_unlock(&desc->lock); 311 raw_spin_unlock(&desc->lock);
460} 312}
313EXPORT_SYMBOL_GPL(handle_simple_irq);
461 314
462/** 315/**
463 * handle_level_irq - Level type irq handler 316 * handle_level_irq - Level type irq handler
@@ -472,42 +325,42 @@ out_unlock:
472void 325void
473handle_level_irq(unsigned int irq, struct irq_desc *desc) 326handle_level_irq(unsigned int irq, struct irq_desc *desc)
474{ 327{
475 struct irqaction *action;
476 irqreturn_t action_ret;
477
478 raw_spin_lock(&desc->lock); 328 raw_spin_lock(&desc->lock);
479 mask_ack_irq(desc, irq); 329 mask_ack_irq(desc);
480 330
481 if (unlikely(desc->status & IRQ_INPROGRESS)) 331 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
482 goto out_unlock; 332 if (!irq_check_poll(desc))
483 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 333 goto out_unlock;
334
335 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
484 kstat_incr_irqs_this_cpu(irq, desc); 336 kstat_incr_irqs_this_cpu(irq, desc);
485 337
486 /* 338 /*
487 * If its disabled or no action available 339 * If its disabled or no action available
488 * keep it masked and get out of here 340 * keep it masked and get out of here
489 */ 341 */
490 action = desc->action; 342 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
491 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
492 goto out_unlock; 343 goto out_unlock;
493 344
494 desc->status |= IRQ_INPROGRESS; 345 handle_irq_event(desc);
495 raw_spin_unlock(&desc->lock);
496
497 action_ret = handle_IRQ_event(irq, action);
498 if (!noirqdebug)
499 note_interrupt(irq, desc, action_ret);
500
501 raw_spin_lock(&desc->lock);
502 desc->status &= ~IRQ_INPROGRESS;
503 346
504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) 347 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
505 unmask_irq(desc, irq); 348 unmask_irq(desc);
506out_unlock: 349out_unlock:
507 raw_spin_unlock(&desc->lock); 350 raw_spin_unlock(&desc->lock);
508} 351}
509EXPORT_SYMBOL_GPL(handle_level_irq); 352EXPORT_SYMBOL_GPL(handle_level_irq);
510 353
354#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
355static inline void preflow_handler(struct irq_desc *desc)
356{
357 if (desc->preflow_handler)
358 desc->preflow_handler(&desc->irq_data);
359}
360#else
361static inline void preflow_handler(struct irq_desc *desc) { }
362#endif
363
511/** 364/**
512 * handle_fasteoi_irq - irq handler for transparent controllers 365 * handle_fasteoi_irq - irq handler for transparent controllers
513 * @irq: the interrupt number 366 * @irq: the interrupt number
@@ -521,42 +374,40 @@ EXPORT_SYMBOL_GPL(handle_level_irq);
521void 374void
522handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 375handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
523{ 376{
524 struct irqaction *action;
525 irqreturn_t action_ret;
526
527 raw_spin_lock(&desc->lock); 377 raw_spin_lock(&desc->lock);
528 378
529 if (unlikely(desc->status & IRQ_INPROGRESS)) 379 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
530 goto out; 380 if (!irq_check_poll(desc))
381 goto out;
531 382
532 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 383 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
533 kstat_incr_irqs_this_cpu(irq, desc); 384 kstat_incr_irqs_this_cpu(irq, desc);
534 385
535 /* 386 /*
536 * If its disabled or no action available 387 * If its disabled or no action available
537 * then mask it and get out of here: 388 * then mask it and get out of here:
538 */ 389 */
539 action = desc->action; 390 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 391 desc->istate |= IRQS_PENDING;
541 desc->status |= IRQ_PENDING; 392 mask_irq(desc);
542 mask_irq(desc, irq);
543 goto out; 393 goto out;
544 } 394 }
545 395
546 desc->status |= IRQ_INPROGRESS; 396 if (desc->istate & IRQS_ONESHOT)
547 desc->status &= ~IRQ_PENDING; 397 mask_irq(desc);
548 raw_spin_unlock(&desc->lock);
549
550 action_ret = handle_IRQ_event(irq, action);
551 if (!noirqdebug)
552 note_interrupt(irq, desc, action_ret);
553 398
554 raw_spin_lock(&desc->lock); 399 preflow_handler(desc);
555 desc->status &= ~IRQ_INPROGRESS; 400 handle_irq_event(desc);
556out:
557 desc->chip->eoi(irq);
558 401
402out_eoi:
403 desc->irq_data.chip->irq_eoi(&desc->irq_data);
404out_unlock:
559 raw_spin_unlock(&desc->lock); 405 raw_spin_unlock(&desc->lock);
406 return;
407out:
408 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
409 goto out_eoi;
410 goto out_unlock;
560} 411}
561 412
562/** 413/**
@@ -565,7 +416,7 @@ out:
565 * @desc: the interrupt description structure for this irq 416 * @desc: the interrupt description structure for this irq
566 * 417 *
567 * Interrupt occures on the falling and/or rising edge of a hardware 418 * Interrupt occures on the falling and/or rising edge of a hardware
568 * signal. The occurence is latched into the irq controller hardware 419 * signal. The occurrence is latched into the irq controller hardware
569 * and must be acked in order to be reenabled. After the ack another 420 * and must be acked in order to be reenabled. After the ack another
570 * interrupt can happen on the same source even before the first one 421 * interrupt can happen on the same source even before the first one
571 * is handled by the associated event handler. If this happens it 422 * is handled by the associated event handler. If this happens it
@@ -580,34 +431,28 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
580{ 431{
581 raw_spin_lock(&desc->lock); 432 raw_spin_lock(&desc->lock);
582 433
583 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 434 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
584
585 /* 435 /*
586 * If we're currently running this IRQ, or its disabled, 436 * If we're currently running this IRQ, or its disabled,
587 * we shouldn't process the IRQ. Mark it pending, handle 437 * we shouldn't process the IRQ. Mark it pending, handle
588 * the necessary masking and go out 438 * the necessary masking and go out
589 */ 439 */
590 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 440 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
591 !desc->action)) { 441 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
592 desc->status |= (IRQ_PENDING | IRQ_MASKED); 442 if (!irq_check_poll(desc)) {
593 mask_ack_irq(desc, irq); 443 desc->istate |= IRQS_PENDING;
594 goto out_unlock; 444 mask_ack_irq(desc);
445 goto out_unlock;
446 }
595 } 447 }
596 kstat_incr_irqs_this_cpu(irq, desc); 448 kstat_incr_irqs_this_cpu(irq, desc);
597 449
598 /* Start handling the irq */ 450 /* Start handling the irq */
599 if (desc->chip->ack) 451 desc->irq_data.chip->irq_ack(&desc->irq_data);
600 desc->chip->ack(irq);
601
602 /* Mark the IRQ currently in progress.*/
603 desc->status |= IRQ_INPROGRESS;
604 452
605 do { 453 do {
606 struct irqaction *action = desc->action; 454 if (unlikely(!desc->action)) {
607 irqreturn_t action_ret; 455 mask_irq(desc);
608
609 if (unlikely(!action)) {
610 mask_irq(desc, irq);
611 goto out_unlock; 456 goto out_unlock;
612 } 457 }
613 458
@@ -616,26 +461,66 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
616 * one, we could have masked the irq. 461 * one, we could have masked the irq.
617 * Renable it, if it was not disabled in meantime. 462 * Renable it, if it was not disabled in meantime.
618 */ 463 */
619 if (unlikely((desc->status & 464 if (unlikely(desc->istate & IRQS_PENDING)) {
620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 465 if (!irqd_irq_disabled(&desc->irq_data) &&
621 (IRQ_PENDING | IRQ_MASKED))) { 466 irqd_irq_masked(&desc->irq_data))
622 unmask_irq(desc, irq); 467 unmask_irq(desc);
623 } 468 }
624 469
625 desc->status &= ~IRQ_PENDING; 470 handle_irq_event(desc);
626 raw_spin_unlock(&desc->lock);
627 action_ret = handle_IRQ_event(irq, action);
628 if (!noirqdebug)
629 note_interrupt(irq, desc, action_ret);
630 raw_spin_lock(&desc->lock);
631 471
632 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 472 } while ((desc->istate & IRQS_PENDING) &&
473 !irqd_irq_disabled(&desc->irq_data));
633 474
634 desc->status &= ~IRQ_INPROGRESS;
635out_unlock: 475out_unlock:
636 raw_spin_unlock(&desc->lock); 476 raw_spin_unlock(&desc->lock);
637} 477}
638 478
479#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
480/**
481 * handle_edge_eoi_irq - edge eoi type IRQ handler
482 * @irq: the interrupt number
483 * @desc: the interrupt description structure for this irq
484 *
485 * Similar as the above handle_edge_irq, but using eoi and w/o the
486 * mask/unmask logic.
487 */
488void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
489{
490 struct irq_chip *chip = irq_desc_get_chip(desc);
491
492 raw_spin_lock(&desc->lock);
493
494 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
495 /*
496 * If we're currently running this IRQ, or its disabled,
497 * we shouldn't process the IRQ. Mark it pending, handle
498 * the necessary masking and go out
499 */
500 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
501 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
502 if (!irq_check_poll(desc)) {
503 desc->istate |= IRQS_PENDING;
504 goto out_eoi;
505 }
506 }
507 kstat_incr_irqs_this_cpu(irq, desc);
508
509 do {
510 if (unlikely(!desc->action))
511 goto out_eoi;
512
513 handle_irq_event(desc);
514
515 } while ((desc->istate & IRQS_PENDING) &&
516 !irqd_irq_disabled(&desc->irq_data));
517
518out_eoi:
519 chip->irq_eoi(&desc->irq_data);
520 raw_spin_unlock(&desc->lock);
521}
522#endif
523
639/** 524/**
640 * handle_percpu_irq - Per CPU local irq handler 525 * handle_percpu_irq - Per CPU local irq handler
641 * @irq: the interrupt number 526 * @irq: the interrupt number
@@ -646,115 +531,147 @@ out_unlock:
646void 531void
647handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 532handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
648{ 533{
649 irqreturn_t action_ret; 534 struct irq_chip *chip = irq_desc_get_chip(desc);
650 535
651 kstat_incr_irqs_this_cpu(irq, desc); 536 kstat_incr_irqs_this_cpu(irq, desc);
652 537
653 if (desc->chip->ack) 538 if (chip->irq_ack)
654 desc->chip->ack(irq); 539 chip->irq_ack(&desc->irq_data);
655 540
656 action_ret = handle_IRQ_event(irq, desc->action); 541 handle_irq_event_percpu(desc, desc->action);
657 if (!noirqdebug)
658 note_interrupt(irq, desc, action_ret);
659 542
660 if (desc->chip->eoi) 543 if (chip->irq_eoi)
661 desc->chip->eoi(irq); 544 chip->irq_eoi(&desc->irq_data);
662} 545}
663 546
664void 547void
665__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 548__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
666 const char *name) 549 const char *name)
667{ 550{
668 struct irq_desc *desc = irq_to_desc(irq);
669 unsigned long flags; 551 unsigned long flags;
552 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
670 553
671 if (!desc) { 554 if (!desc)
672 printk(KERN_ERR
673 "Trying to install type control for IRQ%d\n", irq);
674 return; 555 return;
675 }
676 556
677 if (!handle) 557 if (!handle) {
678 handle = handle_bad_irq; 558 handle = handle_bad_irq;
679 else if (desc->chip == &no_irq_chip) { 559 } else {
680 printk(KERN_WARNING "Trying to install %sinterrupt handler " 560 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
681 "for IRQ%d\n", is_chained ? "chained " : "", irq); 561 goto out;
682 /*
683 * Some ARM implementations install a handler for really dumb
684 * interrupt hardware without setting an irq_chip. This worked
685 * with the ARM no_irq_chip but the check in setup_irq would
686 * prevent us to setup the interrupt at all. Switch it to
687 * dummy_irq_chip for easy transition.
688 */
689 desc->chip = &dummy_irq_chip;
690 } 562 }
691 563
692 chip_bus_lock(irq, desc);
693 raw_spin_lock_irqsave(&desc->lock, flags);
694
695 /* Uninstall? */ 564 /* Uninstall? */
696 if (handle == handle_bad_irq) { 565 if (handle == handle_bad_irq) {
697 if (desc->chip != &no_irq_chip) 566 if (desc->irq_data.chip != &no_irq_chip)
698 mask_ack_irq(desc, irq); 567 mask_ack_irq(desc);
699 desc->status |= IRQ_DISABLED; 568 irq_state_set_disabled(desc);
700 desc->depth = 1; 569 desc->depth = 1;
701 } 570 }
702 desc->handle_irq = handle; 571 desc->handle_irq = handle;
703 desc->name = name; 572 desc->name = name;
704 573
705 if (handle != handle_bad_irq && is_chained) { 574 if (handle != handle_bad_irq && is_chained) {
706 desc->status &= ~IRQ_DISABLED; 575 irq_settings_set_noprobe(desc);
707 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 576 irq_settings_set_norequest(desc);
708 desc->depth = 0; 577 irq_settings_set_nothread(desc);
709 desc->chip->startup(irq); 578 irq_startup(desc);
710 } 579 }
711 raw_spin_unlock_irqrestore(&desc->lock, flags); 580out:
712 chip_bus_sync_unlock(irq, desc); 581 irq_put_desc_busunlock(desc, flags);
713} 582}
714EXPORT_SYMBOL_GPL(__set_irq_handler); 583EXPORT_SYMBOL_GPL(__irq_set_handler);
715 584
716void 585void
717set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 586irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
718 irq_flow_handler_t handle) 587 irq_flow_handler_t handle, const char *name)
719{ 588{
720 set_irq_chip(irq, chip); 589 irq_set_chip(irq, chip);
721 __set_irq_handler(irq, handle, 0, NULL); 590 __irq_set_handler(irq, handle, 0, name);
722} 591}
723 592
724void 593void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
725set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
726 irq_flow_handler_t handle, const char *name)
727{ 594{
728 set_irq_chip(irq, chip); 595 unsigned long flags;
729 __set_irq_handler(irq, handle, 0, name); 596 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
597
598 if (!desc)
599 return;
600 irq_settings_clr_and_set(desc, clr, set);
601
602 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
603 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
604 if (irq_settings_has_no_balance_set(desc))
605 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
606 if (irq_settings_is_per_cpu(desc))
607 irqd_set(&desc->irq_data, IRQD_PER_CPU);
608 if (irq_settings_can_move_pcntxt(desc))
609 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
610 if (irq_settings_is_level(desc))
611 irqd_set(&desc->irq_data, IRQD_LEVEL);
612
613 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
614
615 irq_put_desc_unlock(desc, flags);
730} 616}
617EXPORT_SYMBOL_GPL(irq_modify_status);
731 618
732void set_irq_noprobe(unsigned int irq) 619/**
620 * irq_cpu_online - Invoke all irq_cpu_online functions.
621 *
622 * Iterate through all irqs and invoke the chip.irq_cpu_online()
623 * for each.
624 */
625void irq_cpu_online(void)
733{ 626{
734 struct irq_desc *desc = irq_to_desc(irq); 627 struct irq_desc *desc;
628 struct irq_chip *chip;
735 unsigned long flags; 629 unsigned long flags;
630 unsigned int irq;
736 631
737 if (!desc) { 632 for_each_active_irq(irq) {
738 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); 633 desc = irq_to_desc(irq);
739 return; 634 if (!desc)
740 } 635 continue;
636
637 raw_spin_lock_irqsave(&desc->lock, flags);
741 638
742 raw_spin_lock_irqsave(&desc->lock, flags); 639 chip = irq_data_get_irq_chip(&desc->irq_data);
743 desc->status |= IRQ_NOPROBE; 640 if (chip && chip->irq_cpu_online &&
744 raw_spin_unlock_irqrestore(&desc->lock, flags); 641 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
642 !irqd_irq_disabled(&desc->irq_data)))
643 chip->irq_cpu_online(&desc->irq_data);
644
645 raw_spin_unlock_irqrestore(&desc->lock, flags);
646 }
745} 647}
746 648
747void set_irq_probe(unsigned int irq) 649/**
650 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
651 *
652 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
653 * for each.
654 */
655void irq_cpu_offline(void)
748{ 656{
749 struct irq_desc *desc = irq_to_desc(irq); 657 struct irq_desc *desc;
658 struct irq_chip *chip;
750 unsigned long flags; 659 unsigned long flags;
660 unsigned int irq;
751 661
752 if (!desc) { 662 for_each_active_irq(irq) {
753 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); 663 desc = irq_to_desc(irq);
754 return; 664 if (!desc)
755 } 665 continue;
666
667 raw_spin_lock_irqsave(&desc->lock, flags);
756 668
757 raw_spin_lock_irqsave(&desc->lock, flags); 669 chip = irq_data_get_irq_chip(&desc->irq_data);
758 desc->status &= ~IRQ_NOPROBE; 670 if (chip && chip->irq_cpu_offline &&
759 raw_spin_unlock_irqrestore(&desc->lock, flags); 671 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
672 !irqd_irq_disabled(&desc->irq_data)))
673 chip->irq_cpu_offline(&desc->irq_data);
674
675 raw_spin_unlock_irqrestore(&desc->lock, flags);
676 }
760} 677}
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
new file mode 100644
index 000000000000..97a8bfadc88a
--- /dev/null
+++ b/kernel/irq/debug.h
@@ -0,0 +1,45 @@
1/*
2 * Debugging printout:
3 */
4
5#include <linux/kallsyms.h>
6
7#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */
10#define PD(f) do { } while (0)
11
12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
13{
14 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
15 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
16 printk("->handle_irq(): %p, ", desc->handle_irq);
17 print_symbol("%s\n", (unsigned long)desc->handle_irq);
18 printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
19 print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
20 printk("->action(): %p\n", desc->action);
21 if (desc->action) {
22 printk("->action->handler(): %p, ", desc->action->handler);
23 print_symbol("%s\n", (unsigned long)desc->action->handler);
24 }
25
26 P(IRQ_LEVEL);
27 P(IRQ_PER_CPU);
28 P(IRQ_NOPROBE);
29 P(IRQ_NOREQUEST);
30 P(IRQ_NOTHREAD);
31 P(IRQ_NOAUTOEN);
32
33 PS(IRQS_AUTODETECT);
34 PS(IRQS_REPLAY);
35 PS(IRQS_WAITING);
36 PS(IRQS_PENDING);
37
38 PD(IRQS_INPROGRESS);
39 PD(IRQS_DISABLED);
40 PD(IRQS_MASKED);
41}
42
43#undef P
44#undef PS
45#undef PD
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
new file mode 100644
index 000000000000..b5fcd96c7102
--- /dev/null
+++ b/kernel/irq/dummychip.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the dummy interrupt chip implementation
6 */
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9
10#include "internals.h"
11
12/*
13 * What should we do if we get a hw irq event on an illegal vector?
14 * Each architecture has to answer this themself.
15 */
16static void ack_bad(struct irq_data *data)
17{
18 struct irq_desc *desc = irq_data_to_desc(data);
19
20 print_irq_desc(data->irq, desc);
21 ack_bad_irq(data->irq);
22}
23
24/*
25 * NOP functions
26 */
27static void noop(struct irq_data *data) { }
28
29static unsigned int noop_ret(struct irq_data *data)
30{
31 return 0;
32}
33
34/*
35 * Generic no controller implementation
36 */
37struct irq_chip no_irq_chip = {
38 .name = "none",
39 .irq_startup = noop_ret,
40 .irq_shutdown = noop,
41 .irq_enable = noop,
42 .irq_disable = noop,
43 .irq_ack = ack_bad,
44};
45
46/*
47 * Generic dummy implementation which can be used for
48 * real dumb interrupt sources
49 */
50struct irq_chip dummy_irq_chip = {
51 .name = "dummy",
52 .irq_startup = noop_ret,
53 .irq_shutdown = noop,
54 .irq_enable = noop,
55 .irq_disable = noop,
56 .irq_ack = noop,
57 .irq_mask = noop,
58 .irq_unmask = noop,
59};
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
new file mode 100644
index 000000000000..3a2cab407b93
--- /dev/null
+++ b/kernel/irq/generic-chip.c
@@ -0,0 +1,368 @@
1/*
2 * Library implementing the most common irq chip callback functions
3 *
4 * Copyright (C) 2011, Thomas Gleixner
5 */
6#include <linux/io.h>
7#include <linux/irq.h>
8#include <linux/slab.h>
9#include <linux/interrupt.h>
10#include <linux/kernel_stat.h>
11#include <linux/syscore_ops.h>
12
13#include "internals.h"
14
15static LIST_HEAD(gc_list);
16static DEFINE_RAW_SPINLOCK(gc_lock);
17
18static inline struct irq_chip_regs *cur_regs(struct irq_data *d)
19{
20 return &container_of(d->chip, struct irq_chip_type, chip)->regs;
21}
22
23/**
24 * irq_gc_noop - NOOP function
25 * @d: irq_data
26 */
27void irq_gc_noop(struct irq_data *d)
28{
29}
30
31/**
32 * irq_gc_mask_disable_reg - Mask chip via disable register
33 * @d: irq_data
34 *
35 * Chip has separate enable/disable registers instead of a single mask
36 * register.
37 */
38void irq_gc_mask_disable_reg(struct irq_data *d)
39{
40 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
41 u32 mask = 1 << (d->irq - gc->irq_base);
42
43 irq_gc_lock(gc);
44 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable);
45 gc->mask_cache &= ~mask;
46 irq_gc_unlock(gc);
47}
48
49/**
50 * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register
51 * @d: irq_data
52 *
53 * Chip has a single mask register. Values of this register are cached
54 * and protected by gc->lock
55 */
56void irq_gc_mask_set_bit(struct irq_data *d)
57{
58 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
59 u32 mask = 1 << (d->irq - gc->irq_base);
60
61 irq_gc_lock(gc);
62 gc->mask_cache |= mask;
63 irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
64 irq_gc_unlock(gc);
65}
66
67/**
68 * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register
69 * @d: irq_data
70 *
71 * Chip has a single mask register. Values of this register are cached
72 * and protected by gc->lock
73 */
74void irq_gc_mask_clr_bit(struct irq_data *d)
75{
76 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
77 u32 mask = 1 << (d->irq - gc->irq_base);
78
79 irq_gc_lock(gc);
80 gc->mask_cache &= ~mask;
81 irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
82 irq_gc_unlock(gc);
83}
84
85/**
86 * irq_gc_unmask_enable_reg - Unmask chip via enable register
87 * @d: irq_data
88 *
89 * Chip has separate enable/disable registers instead of a single mask
90 * register.
91 */
92void irq_gc_unmask_enable_reg(struct irq_data *d)
93{
94 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
95 u32 mask = 1 << (d->irq - gc->irq_base);
96
97 irq_gc_lock(gc);
98 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable);
99 gc->mask_cache |= mask;
100 irq_gc_unlock(gc);
101}
102
103/**
104 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
105 * @d: irq_data
106 */
107void irq_gc_ack_set_bit(struct irq_data *d)
108{
109 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
110 u32 mask = 1 << (d->irq - gc->irq_base);
111
112 irq_gc_lock(gc);
113 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
114 irq_gc_unlock(gc);
115}
116
117/**
118 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
119 * @d: irq_data
120 */
121void irq_gc_ack_clr_bit(struct irq_data *d)
122{
123 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
124 u32 mask = ~(1 << (d->irq - gc->irq_base));
125
126 irq_gc_lock(gc);
127 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
128 irq_gc_unlock(gc);
129}
130
131/**
132 * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
133 * @d: irq_data
134 */
135void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
136{
137 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
138 u32 mask = 1 << (d->irq - gc->irq_base);
139
140 irq_gc_lock(gc);
141 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask);
142 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
143 irq_gc_unlock(gc);
144}
145
146/**
147 * irq_gc_eoi - EOI interrupt
148 * @d: irq_data
149 */
150void irq_gc_eoi(struct irq_data *d)
151{
152 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
153 u32 mask = 1 << (d->irq - gc->irq_base);
154
155 irq_gc_lock(gc);
156 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi);
157 irq_gc_unlock(gc);
158}
159
160/**
161 * irq_gc_set_wake - Set/clr wake bit for an interrupt
162 * @d: irq_data
163 *
164 * For chips where the wake from suspend functionality is not
165 * configured in a separate register and the wakeup active state is
166 * just stored in a bitmask.
167 */
168int irq_gc_set_wake(struct irq_data *d, unsigned int on)
169{
170 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
171 u32 mask = 1 << (d->irq - gc->irq_base);
172
173 if (!(mask & gc->wake_enabled))
174 return -EINVAL;
175
176 irq_gc_lock(gc);
177 if (on)
178 gc->wake_active |= mask;
179 else
180 gc->wake_active &= ~mask;
181 irq_gc_unlock(gc);
182 return 0;
183}
184
185/**
186 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
187 * @name: Name of the irq chip
188 * @num_ct: Number of irq_chip_type instances associated with this
189 * @irq_base: Interrupt base nr for this chip
190 * @reg_base: Register base address (virtual)
191 * @handler: Default flow handler associated with this chip
192 *
193 * Returns an initialized irq_chip_generic structure. The chip defaults
194 * to the primary (index 0) irq_chip_type and @handler
195 */
196struct irq_chip_generic *
197irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
198 void __iomem *reg_base, irq_flow_handler_t handler)
199{
200 struct irq_chip_generic *gc;
201 unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
202
203 gc = kzalloc(sz, GFP_KERNEL);
204 if (gc) {
205 raw_spin_lock_init(&gc->lock);
206 gc->num_ct = num_ct;
207 gc->irq_base = irq_base;
208 gc->reg_base = reg_base;
209 gc->chip_types->chip.name = name;
210 gc->chip_types->handler = handler;
211 }
212 return gc;
213}
214
215/*
216 * Separate lockdep class for interrupt chip which can nest irq_desc
217 * lock.
218 */
219static struct lock_class_key irq_nested_lock_class;
220
221/**
222 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
223 * @gc: Generic irq chip holding all data
224 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
225 * @flags: Flags for initialization
226 * @clr: IRQ_* bits to clear
227 * @set: IRQ_* bits to set
228 *
229 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
230 * initializes all interrupts to the primary irq_chip_type and its
231 * associated handler.
232 */
233void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
234 enum irq_gc_flags flags, unsigned int clr,
235 unsigned int set)
236{
237 struct irq_chip_type *ct = gc->chip_types;
238 unsigned int i;
239
240 raw_spin_lock(&gc_lock);
241 list_add_tail(&gc->list, &gc_list);
242 raw_spin_unlock(&gc_lock);
243
244 /* Init mask cache ? */
245 if (flags & IRQ_GC_INIT_MASK_CACHE)
246 gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
247
248 for (i = gc->irq_base; msk; msk >>= 1, i++) {
249 if (!msk & 0x01)
250 continue;
251
252 if (flags & IRQ_GC_INIT_NESTED_LOCK)
253 irq_set_lockdep_class(i, &irq_nested_lock_class);
254
255 irq_set_chip_and_handler(i, &ct->chip, ct->handler);
256 irq_set_chip_data(i, gc);
257 irq_modify_status(i, clr, set);
258 }
259 gc->irq_cnt = i - gc->irq_base;
260}
261
262/**
263 * irq_setup_alt_chip - Switch to alternative chip
264 * @d: irq_data for this interrupt
265 * @type Flow type to be initialized
266 *
267 * Only to be called from chip->irq_set_type() callbacks.
268 */
269int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
270{
271 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
272 struct irq_chip_type *ct = gc->chip_types;
273 unsigned int i;
274
275 for (i = 0; i < gc->num_ct; i++, ct++) {
276 if (ct->type & type) {
277 d->chip = &ct->chip;
278 irq_data_to_desc(d)->handle_irq = ct->handler;
279 return 0;
280 }
281 }
282 return -EINVAL;
283}
284
285/**
286 * irq_remove_generic_chip - Remove a chip
287 * @gc: Generic irq chip holding all data
288 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
289 * @clr: IRQ_* bits to clear
290 * @set: IRQ_* bits to set
291 *
292 * Remove up to 32 interrupts starting from gc->irq_base.
293 */
294void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
295 unsigned int clr, unsigned int set)
296{
297 unsigned int i = gc->irq_base;
298
299 raw_spin_lock(&gc_lock);
300 list_del(&gc->list);
301 raw_spin_unlock(&gc_lock);
302
303 for (; msk; msk >>= 1, i++) {
304 if (!msk & 0x01)
305 continue;
306
307 /* Remove handler first. That will mask the irq line */
308 irq_set_handler(i, NULL);
309 irq_set_chip(i, &no_irq_chip);
310 irq_set_chip_data(i, NULL);
311 irq_modify_status(i, clr, set);
312 }
313}
314
315#ifdef CONFIG_PM
316static int irq_gc_suspend(void)
317{
318 struct irq_chip_generic *gc;
319
320 list_for_each_entry(gc, &gc_list, list) {
321 struct irq_chip_type *ct = gc->chip_types;
322
323 if (ct->chip.irq_suspend)
324 ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base));
325 }
326 return 0;
327}
328
329static void irq_gc_resume(void)
330{
331 struct irq_chip_generic *gc;
332
333 list_for_each_entry(gc, &gc_list, list) {
334 struct irq_chip_type *ct = gc->chip_types;
335
336 if (ct->chip.irq_resume)
337 ct->chip.irq_resume(irq_get_irq_data(gc->irq_base));
338 }
339}
340#else
341#define irq_gc_suspend NULL
342#define irq_gc_resume NULL
343#endif
344
345static void irq_gc_shutdown(void)
346{
347 struct irq_chip_generic *gc;
348
349 list_for_each_entry(gc, &gc_list, list) {
350 struct irq_chip_type *ct = gc->chip_types;
351
352 if (ct->chip.irq_pm_shutdown)
353 ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base));
354 }
355}
356
357static struct syscore_ops irq_gc_syscore_ops = {
358 .suspend = irq_gc_suspend,
359 .resume = irq_gc_resume,
360 .shutdown = irq_gc_shutdown,
361};
362
363static int __init irq_gc_init_ops(void)
364{
365 register_syscore_ops(&irq_gc_syscore_ops);
366 return 0;
367}
368device_initcall(irq_gc_init_ops);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 27e5c6911223..470d08c82bbe 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,24 +11,15 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/random.h> 14#include <linux/random.h>
15#include <linux/sched.h>
18#include <linux/interrupt.h> 16#include <linux/interrupt.h>
19#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 18
21#include <linux/hash.h>
22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 19#include <trace/events/irq.h>
24 20
25#include "internals.h" 21#include "internals.h"
26 22
27/*
28 * lockdep: we want to handle all irq_desc locks as a single lock-class:
29 */
30struct lock_class_key irq_desc_lock_class;
31
32/** 23/**
33 * handle_bad_irq - handle spurious and unhandled irqs 24 * handle_bad_irq - handle spurious and unhandled irqs
34 * @irq: the interrupt number 25 * @irq: the interrupt number
@@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
43 ack_bad_irq(irq); 34 ack_bad_irq(irq);
44} 35}
45 36
46#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47static void __init init_irq_default_affinity(void)
48{
49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 cpumask_setall(irq_default_affinity);
51}
52#else
53static void __init init_irq_default_affinity(void)
54{
55}
56#endif
57
58/*
59 * Linux has a controller-independent interrupt architecture.
60 * Every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
65 *
66 * The code is designed to be easily extended with new/different
67 * interrupt controllers, without having to do assembly magic or
68 * having to touch the generic code.
69 *
70 * Controller mappings for all interrupt sources:
71 */
72int nr_irqs = NR_IRQS;
73EXPORT_SYMBOL_GPL(nr_irqs);
74
75#ifdef CONFIG_SPARSE_IRQ
76
77static struct irq_desc irq_desc_init = {
78 .irq = -1,
79 .status = IRQ_DISABLED,
80 .chip = &no_irq_chip,
81 .handle_irq = handle_bad_irq,
82 .depth = 1,
83 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84};
85
86void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87{
88 void *ptr;
89
90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 GFP_ATOMIC, node);
92
93 /*
94 * don't overwite if can not get new one
95 * init_copy_kstat_irqs() could still use old one
96 */
97 if (ptr) {
98 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
99 desc->kstat_irqs = ptr;
100 }
101}
102
103static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
104{
105 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
106
107 raw_spin_lock_init(&desc->lock);
108 desc->irq = irq;
109#ifdef CONFIG_SMP
110 desc->node = node;
111#endif
112 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
113 init_kstat_irqs(desc, node, nr_cpu_ids);
114 if (!desc->kstat_irqs) {
115 printk(KERN_ERR "can not alloc kstat_irqs\n");
116 BUG_ON(1);
117 }
118 if (!alloc_desc_masks(desc, node, false)) {
119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
120 BUG_ON(1);
121 }
122 init_desc_masks(desc);
123 arch_init_chip_data(desc, node);
124}
125
126/*
127 * Protect the sparse_irqs:
128 */
129DEFINE_RAW_SPINLOCK(sparse_irq_lock);
130
131static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
132
133static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
134{
135 radix_tree_insert(&irq_desc_tree, irq, desc);
136}
137
138struct irq_desc *irq_to_desc(unsigned int irq)
139{
140 return radix_tree_lookup(&irq_desc_tree, irq);
141}
142
143void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
144{
145 void **ptr;
146
147 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
148 if (ptr)
149 radix_tree_replace_slot(ptr, desc);
150}
151
152static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
153 [0 ... NR_IRQS_LEGACY-1] = {
154 .irq = -1,
155 .status = IRQ_DISABLED,
156 .chip = &no_irq_chip,
157 .handle_irq = handle_bad_irq,
158 .depth = 1,
159 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
160 }
161};
162
163static unsigned int *kstat_irqs_legacy;
164
165int __init early_irq_init(void)
166{
167 struct irq_desc *desc;
168 int legacy_count;
169 int node;
170 int i;
171
172 init_irq_default_affinity();
173
174 /* initialize nr_irqs based on nr_cpu_ids */
175 arch_probe_nr_irqs();
176 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
177
178 desc = irq_desc_legacy;
179 legacy_count = ARRAY_SIZE(irq_desc_legacy);
180 node = first_online_node;
181
182 /* allocate based on nr_cpu_ids */
183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
184 sizeof(int), GFP_NOWAIT, node);
185
186 for (i = 0; i < legacy_count; i++) {
187 desc[i].irq = i;
188#ifdef CONFIG_SMP
189 desc[i].node = node;
190#endif
191 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
193 alloc_desc_masks(&desc[i], node, true);
194 init_desc_masks(&desc[i]);
195 set_irq_desc(i, &desc[i]);
196 }
197
198 return arch_early_irq_init();
199}
200
201struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
202{
203 struct irq_desc *desc;
204 unsigned long flags;
205
206 if (irq >= nr_irqs) {
207 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
208 irq, nr_irqs);
209 return NULL;
210 }
211
212 desc = irq_to_desc(irq);
213 if (desc)
214 return desc;
215
216 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
217
218 /* We have to check it to avoid races with another CPU */
219 desc = irq_to_desc(irq);
220 if (desc)
221 goto out_unlock;
222
223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224
225 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
226 if (!desc) {
227 printk(KERN_ERR "can not alloc irq_desc\n");
228 BUG_ON(1);
229 }
230 init_one_irq_desc(irq, desc, node);
231
232 set_irq_desc(irq, desc);
233
234out_unlock:
235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
236
237 return desc;
238}
239
240#else /* !CONFIG_SPARSE_IRQ */
241
242struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
243 [0 ... NR_IRQS-1] = {
244 .status = IRQ_DISABLED,
245 .chip = &no_irq_chip,
246 .handle_irq = handle_bad_irq,
247 .depth = 1,
248 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
249 }
250};
251
252static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
253int __init early_irq_init(void)
254{
255 struct irq_desc *desc;
256 int count;
257 int i;
258
259 init_irq_default_affinity();
260
261 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
262
263 desc = irq_desc;
264 count = ARRAY_SIZE(irq_desc);
265
266 for (i = 0; i < count; i++) {
267 desc[i].irq = i;
268 alloc_desc_masks(&desc[i], 0, true);
269 init_desc_masks(&desc[i]);
270 desc[i].kstat_irqs = kstat_irqs_all[i];
271 }
272 return arch_early_irq_init();
273}
274
275struct irq_desc *irq_to_desc(unsigned int irq)
276{
277 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
278}
279
280struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
281{
282 return irq_to_desc(irq);
283}
284#endif /* !CONFIG_SPARSE_IRQ */
285
286void clear_kstat_irqs(struct irq_desc *desc)
287{
288 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
289}
290
291/*
292 * What should we do if we get a hw irq event on an illegal vector?
293 * Each architecture has to answer this themself.
294 */
295static void ack_bad(unsigned int irq)
296{
297 struct irq_desc *desc = irq_to_desc(irq);
298
299 print_irq_desc(irq, desc);
300 ack_bad_irq(irq);
301}
302
303/*
304 * NOP functions
305 */
306static void noop(unsigned int irq)
307{
308}
309
310static unsigned int noop_ret(unsigned int irq)
311{
312 return 0;
313}
314
315/*
316 * Generic no controller implementation
317 */
318struct irq_chip no_irq_chip = {
319 .name = "none",
320 .startup = noop_ret,
321 .shutdown = noop,
322 .enable = noop,
323 .disable = noop,
324 .ack = ack_bad,
325 .end = noop,
326};
327
328/*
329 * Generic dummy implementation which can be used for
330 * real dumb interrupt sources
331 */
332struct irq_chip dummy_irq_chip = {
333 .name = "dummy",
334 .startup = noop_ret,
335 .shutdown = noop,
336 .enable = noop,
337 .disable = noop,
338 .ack = noop,
339 .mask = noop,
340 .unmask = noop,
341 .end = noop,
342};
343
344/* 37/*
345 * Special, empty irq handler: 38 * Special, empty irq handler:
346 */ 39 */
@@ -358,31 +51,87 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
358 "but no thread function available.", irq, action->name); 51 "but no thread function available.", irq, action->name);
359} 52}
360 53
361/** 54static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
362 * handle_IRQ_event - irq action chain handler 55{
363 * @irq: the interrupt number 56 /*
364 * @action: the interrupt action chain for this irq 57 * Wake up the handler thread for this action. In case the
365 * 58 * thread crashed and was killed we just pretend that we
366 * Handles the action chain of an irq event 59 * handled the interrupt. The hardirq handler has disabled the
367 */ 60 * device interrupt, so no irq storm is lurking. If the
368irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 61 * RUNTHREAD bit is already set, nothing to do.
62 */
63 if (test_bit(IRQTF_DIED, &action->thread_flags) ||
64 test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
65 return;
66
67 /*
68 * It's safe to OR the mask lockless here. We have only two
69 * places which write to threads_oneshot: This code and the
70 * irq thread.
71 *
72 * This code is the hard irq context and can never run on two
73 * cpus in parallel. If it ever does we have more serious
74 * problems than this bitmask.
75 *
76 * The irq threads of this irq which clear their "running" bit
77 * in threads_oneshot are serialized via desc->lock against
78 * each other and they are serialized against this code by
79 * IRQS_INPROGRESS.
80 *
81 * Hard irq handler:
82 *
83 * spin_lock(desc->lock);
84 * desc->state |= IRQS_INPROGRESS;
85 * spin_unlock(desc->lock);
86 * set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
87 * desc->threads_oneshot |= mask;
88 * spin_lock(desc->lock);
89 * desc->state &= ~IRQS_INPROGRESS;
90 * spin_unlock(desc->lock);
91 *
92 * irq thread:
93 *
94 * again:
95 * spin_lock(desc->lock);
96 * if (desc->state & IRQS_INPROGRESS) {
97 * spin_unlock(desc->lock);
98 * while(desc->state & IRQS_INPROGRESS)
99 * cpu_relax();
100 * goto again;
101 * }
102 * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
103 * desc->threads_oneshot &= ~mask;
104 * spin_unlock(desc->lock);
105 *
106 * So either the thread waits for us to clear IRQS_INPROGRESS
107 * or we are waiting in the flow handler for desc->lock to be
108 * released before we reach this point. The thread also checks
109 * IRQTF_RUNTHREAD under desc->lock. If set it leaves
110 * threads_oneshot untouched and runs the thread another time.
111 */
112 desc->threads_oneshot |= action->thread_mask;
113 wake_up_process(action->thread);
114}
115
116irqreturn_t
117handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
369{ 118{
370 irqreturn_t ret, retval = IRQ_NONE; 119 irqreturn_t retval = IRQ_NONE;
371 unsigned int status = 0; 120 unsigned int random = 0, irq = desc->irq_data.irq;
372 121
373 do { 122 do {
123 irqreturn_t res;
124
374 trace_irq_handler_entry(irq, action); 125 trace_irq_handler_entry(irq, action);
375 ret = action->handler(irq, action->dev_id); 126 res = action->handler(irq, action->dev_id);
376 trace_irq_handler_exit(irq, action, ret); 127 trace_irq_handler_exit(irq, action, res);
377 128
378 switch (ret) { 129 if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
379 case IRQ_WAKE_THREAD: 130 irq, action->handler))
380 /* 131 local_irq_disable();
381 * Set result to handled so the spurious check
382 * does not trigger.
383 */
384 ret = IRQ_HANDLED;
385 132
133 switch (res) {
134 case IRQ_WAKE_THREAD:
386 /* 135 /*
387 * Catch drivers which return WAKE_THREAD but 136 * Catch drivers which return WAKE_THREAD but
388 * did not set up a thread function 137 * did not set up a thread function
@@ -392,165 +141,41 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
392 break; 141 break;
393 } 142 }
394 143
395 /* 144 irq_wake_thread(desc, action);
396 * Wake up the handler thread for this
397 * action. In case the thread crashed and was
398 * killed we just pretend that we handled the
399 * interrupt. The hardirq handler above has
400 * disabled the device interrupt, so no irq
401 * storm is lurking.
402 */
403 if (likely(!test_bit(IRQTF_DIED,
404 &action->thread_flags))) {
405 set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
406 wake_up_process(action->thread);
407 }
408 145
409 /* Fall through to add to randomness */ 146 /* Fall through to add to randomness */
410 case IRQ_HANDLED: 147 case IRQ_HANDLED:
411 status |= action->flags; 148 random |= action->flags;
412 break; 149 break;
413 150
414 default: 151 default:
415 break; 152 break;
416 } 153 }
417 154
418 retval |= ret; 155 retval |= res;
419 action = action->next; 156 action = action->next;
420 } while (action); 157 } while (action);
421 158
422 if (status & IRQF_SAMPLE_RANDOM) 159 if (random & IRQF_SAMPLE_RANDOM)
423 add_interrupt_randomness(irq); 160 add_interrupt_randomness(irq);
424 local_irq_disable();
425 161
162 if (!noirqdebug)
163 note_interrupt(irq, desc, retval);
426 return retval; 164 return retval;
427} 165}
428 166
429#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 167irqreturn_t handle_irq_event(struct irq_desc *desc)
430
431#ifdef CONFIG_ENABLE_WARN_DEPRECATED
432# warning __do_IRQ is deprecated. Please convert to proper flow handlers
433#endif
434
435/**
436 * __do_IRQ - original all in one highlevel IRQ handler
437 * @irq: the interrupt number
438 *
439 * __do_IRQ handles all normal device IRQ's (the special
440 * SMP cross-CPU interrupts have their own specific
441 * handlers).
442 *
443 * This is the original x86 implementation which is used for every
444 * interrupt type.
445 */
446unsigned int __do_IRQ(unsigned int irq)
447{ 168{
448 struct irq_desc *desc = irq_to_desc(irq); 169 struct irqaction *action = desc->action;
449 struct irqaction *action; 170 irqreturn_t ret;
450 unsigned int status;
451
452 kstat_incr_irqs_this_cpu(irq, desc);
453
454 if (CHECK_IRQ_PER_CPU(desc->status)) {
455 irqreturn_t action_ret;
456
457 /*
458 * No locking required for CPU-local interrupts:
459 */
460 if (desc->chip->ack)
461 desc->chip->ack(irq);
462 if (likely(!(desc->status & IRQ_DISABLED))) {
463 action_ret = handle_IRQ_event(irq, desc->action);
464 if (!noirqdebug)
465 note_interrupt(irq, desc, action_ret);
466 }
467 desc->chip->end(irq);
468 return 1;
469 }
470
471 raw_spin_lock(&desc->lock);
472 if (desc->chip->ack)
473 desc->chip->ack(irq);
474 /*
475 * REPLAY is when Linux resends an IRQ that was dropped earlier
476 * WAITING is used by probe to mark irqs that are being tested
477 */
478 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
479 status |= IRQ_PENDING; /* we _want_ to handle it */
480
481 /*
482 * If the IRQ is disabled for whatever reason, we cannot
483 * use the action we have.
484 */
485 action = NULL;
486 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
487 action = desc->action;
488 status &= ~IRQ_PENDING; /* we commit to handling */
489 status |= IRQ_INPROGRESS; /* we are handling it */
490 }
491 desc->status = status;
492 171
493 /* 172 desc->istate &= ~IRQS_PENDING;
494 * If there is no IRQ handler or it was disabled, exit early. 173 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
495 * Since we set PENDING, if another processor is handling
496 * a different instance of this same irq, the other processor
497 * will take care of it.
498 */
499 if (unlikely(!action))
500 goto out;
501
502 /*
503 * Edge triggered interrupts need to remember
504 * pending events.
505 * This applies to any hw interrupts that allow a second
506 * instance of the same irq to arrive while we are in do_IRQ
507 * or in the handler. But the code here only handles the _second_
508 * instance of the irq, not the third or fourth. So it is mostly
509 * useful for irq hardware that does not mask cleanly in an
510 * SMP environment.
511 */
512 for (;;) {
513 irqreturn_t action_ret;
514
515 raw_spin_unlock(&desc->lock);
516
517 action_ret = handle_IRQ_event(irq, action);
518 if (!noirqdebug)
519 note_interrupt(irq, desc, action_ret);
520
521 raw_spin_lock(&desc->lock);
522 if (likely(!(desc->status & IRQ_PENDING)))
523 break;
524 desc->status &= ~IRQ_PENDING;
525 }
526 desc->status &= ~IRQ_INPROGRESS;
527
528out:
529 /*
530 * The ->end() handler has to deal with interrupts which got
531 * disabled while the handler was running.
532 */
533 desc->chip->end(irq);
534 raw_spin_unlock(&desc->lock); 174 raw_spin_unlock(&desc->lock);
535 175
536 return 1; 176 ret = handle_irq_event_percpu(desc, action);
537}
538#endif
539
540void early_init_irq_lock_class(void)
541{
542 struct irq_desc *desc;
543 int i;
544
545 for_each_irq_desc(i, desc) {
546 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
547 }
548}
549 177
550unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 178 raw_spin_lock(&desc->lock);
551{ 179 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
552 struct irq_desc *desc = irq_to_desc(irq); 180 return ret;
553 return desc ? desc->kstat_irqs[cpu] : 0;
554} 181}
555EXPORT_SYMBOL(kstat_irqs_cpu);
556
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c63f3bc88f0b..6546431447d7 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -1,95 +1,171 @@
1/* 1/*
2 * IRQ subsystem internal functions and variables: 2 * IRQ subsystem internal functions and variables:
3 *
4 * Do not ever include this file from anything else than
5 * kernel/irq/. Do not even think about using any information outside
6 * of this file for your non core code.
3 */ 7 */
8#include <linux/irqdesc.h>
9
10#ifdef CONFIG_SPARSE_IRQ
11# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
12#else
13# define IRQ_BITMAP_BITS NR_IRQS
14#endif
15
16#define istate core_internal_state__do_not_mess_with_it
4 17
5extern int noirqdebug; 18extern int noirqdebug;
6 19
7/* Set default functions for irq_chip structures: */ 20/*
8extern void irq_chip_set_defaults(struct irq_chip *chip); 21 * Bits used by threaded handlers:
22 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
23 * IRQTF_DIED - handler thread died
24 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
25 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
26 * IRQTF_FORCED_THREAD - irq action is force threaded
27 */
28enum {
29 IRQTF_RUNTHREAD,
30 IRQTF_DIED,
31 IRQTF_WARNED,
32 IRQTF_AFFINITY,
33 IRQTF_FORCED_THREAD,
34};
9 35
10/* Set default handler: */ 36/*
11extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); 37 * Bit masks for desc->state
38 *
39 * IRQS_AUTODETECT - autodetection in progress
40 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
41 * detection
42 * IRQS_POLL_INPROGRESS - polling in progress
43 * IRQS_ONESHOT - irq is not unmasked in primary handler
44 * IRQS_REPLAY - irq is replayed
45 * IRQS_WAITING - irq is waiting
46 * IRQS_PENDING - irq is pending and replayed later
47 * IRQS_SUSPENDED - irq is suspended
48 */
49enum {
50 IRQS_AUTODETECT = 0x00000001,
51 IRQS_SPURIOUS_DISABLED = 0x00000002,
52 IRQS_POLL_INPROGRESS = 0x00000008,
53 IRQS_ONESHOT = 0x00000020,
54 IRQS_REPLAY = 0x00000040,
55 IRQS_WAITING = 0x00000080,
56 IRQS_PENDING = 0x00000200,
57 IRQS_SUSPENDED = 0x00000800,
58};
59
60#include "debug.h"
61#include "settings.h"
62
63#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
12 64
13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 65extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
14 unsigned long flags); 66 unsigned long flags);
15extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 67extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
16extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 68extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
17 69
18extern struct lock_class_key irq_desc_lock_class; 70extern int irq_startup(struct irq_desc *desc);
71extern void irq_shutdown(struct irq_desc *desc);
72extern void irq_enable(struct irq_desc *desc);
73extern void irq_disable(struct irq_desc *desc);
74extern void mask_irq(struct irq_desc *desc);
75extern void unmask_irq(struct irq_desc *desc);
76
19extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 77extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc);
21extern raw_spinlock_t sparse_irq_lock;
22 78
23#ifdef CONFIG_SPARSE_IRQ 79irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
24void replace_irq_desc(unsigned int irq, struct irq_desc *desc); 80irqreturn_t handle_irq_event(struct irq_desc *desc);
25#endif 81
82/* Resending of interrupts :*/
83void check_irq_resend(struct irq_desc *desc, unsigned int irq);
84bool irq_wait_for_poll(struct irq_desc *desc);
26 85
27#ifdef CONFIG_PROC_FS 86#ifdef CONFIG_PROC_FS
28extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 87extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
88extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
29extern void register_handler_proc(unsigned int irq, struct irqaction *action); 89extern void register_handler_proc(unsigned int irq, struct irqaction *action);
30extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); 90extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
31#else 91#else
32static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } 92static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
93static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
33static inline void register_handler_proc(unsigned int irq, 94static inline void register_handler_proc(unsigned int irq,
34 struct irqaction *action) { } 95 struct irqaction *action) { }
35static inline void unregister_handler_proc(unsigned int irq, 96static inline void unregister_handler_proc(unsigned int irq,
36 struct irqaction *action) { } 97 struct irqaction *action) { }
37#endif 98#endif
38 99
39extern int irq_select_affinity_usr(unsigned int irq); 100extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
40 101
41extern void irq_set_thread_affinity(struct irq_desc *desc); 102extern void irq_set_thread_affinity(struct irq_desc *desc);
42 103
43/* Inline functions for support of irq chips on slow busses */ 104/* Inline functions for support of irq chips on slow busses */
44static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) 105static inline void chip_bus_lock(struct irq_desc *desc)
106{
107 if (unlikely(desc->irq_data.chip->irq_bus_lock))
108 desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
109}
110
111static inline void chip_bus_sync_unlock(struct irq_desc *desc)
112{
113 if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
114 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
115}
116
117struct irq_desc *
118__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
119void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
120
121static inline struct irq_desc *
122irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
123{
124 return __irq_get_desc_lock(irq, flags, true);
125}
126
127static inline void
128irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
45{ 129{
46 if (unlikely(desc->chip->bus_lock)) 130 __irq_put_desc_unlock(desc, flags, true);
47 desc->chip->bus_lock(irq);
48} 131}
49 132
50static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) 133static inline struct irq_desc *
134irq_get_desc_lock(unsigned int irq, unsigned long *flags)
51{ 135{
52 if (unlikely(desc->chip->bus_sync_unlock)) 136 return __irq_get_desc_lock(irq, flags, false);
53 desc->chip->bus_sync_unlock(irq); 137}
138
139static inline void
140irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
141{
142 __irq_put_desc_unlock(desc, flags, false);
54} 143}
55 144
56/* 145/*
57 * Debugging printout: 146 * Manipulation functions for irq_data.state
58 */ 147 */
148static inline void irqd_set_move_pending(struct irq_data *d)
149{
150 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
151}
59 152
60#include <linux/kallsyms.h> 153static inline void irqd_clr_move_pending(struct irq_data *d)
61 154{
62#define P(f) if (desc->status & f) printk("%14s set\n", #f) 155 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
156}
63 157
64static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 158static inline void irqd_clear(struct irq_data *d, unsigned int mask)
65{ 159{
66 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", 160 d->state_use_accessors &= ~mask;
67 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
68 printk("->handle_irq(): %p, ", desc->handle_irq);
69 print_symbol("%s\n", (unsigned long)desc->handle_irq);
70 printk("->chip(): %p, ", desc->chip);
71 print_symbol("%s\n", (unsigned long)desc->chip);
72 printk("->action(): %p\n", desc->action);
73 if (desc->action) {
74 printk("->action->handler(): %p, ", desc->action->handler);
75 print_symbol("%s\n", (unsigned long)desc->action->handler);
76 }
77
78 P(IRQ_INPROGRESS);
79 P(IRQ_DISABLED);
80 P(IRQ_PENDING);
81 P(IRQ_REPLAY);
82 P(IRQ_AUTODETECT);
83 P(IRQ_WAITING);
84 P(IRQ_LEVEL);
85 P(IRQ_MASKED);
86#ifdef CONFIG_IRQ_PER_CPU
87 P(IRQ_PER_CPU);
88#endif
89 P(IRQ_NOPROBE);
90 P(IRQ_NOREQUEST);
91 P(IRQ_NOAUTOEN);
92} 161}
93 162
94#undef P 163static inline void irqd_set(struct irq_data *d, unsigned int mask)
164{
165 d->state_use_accessors |= mask;
166}
95 167
168static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
169{
170 return d->state_use_accessors & mask;
171}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
new file mode 100644
index 000000000000..4c60a50e66b2
--- /dev/null
+++ b/kernel/irq/irqdesc.c
@@ -0,0 +1,466 @@
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
16#include <linux/bitmap.h>
17
18#include "internals.h"
19
20/*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
23static struct lock_class_key irq_desc_lock_class;
24
25#if defined(CONFIG_SMP)
26static void __init init_irq_default_affinity(void)
27{
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30}
31#else
32static void __init init_irq_default_affinity(void)
33{
34}
35#endif
36
37#ifdef CONFIG_SMP
38static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39{
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43#ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48#endif
49 return 0;
50}
51
52static void desc_smp_init(struct irq_desc *desc, int node)
53{
54 desc->irq_data.node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56#ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc->pending_mask);
58#endif
59}
60
61static inline int desc_node(struct irq_desc *desc)
62{
63 return desc->irq_data.node;
64}
65
66#else
67static inline int
68alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70static inline int desc_node(struct irq_desc *desc) { return 0; }
71#endif
72
73static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74{
75 int cpu;
76
77 desc->irq_data.irq = irq;
78 desc->irq_data.chip = &no_irq_chip;
79 desc->irq_data.chip_data = NULL;
80 desc->irq_data.handler_data = NULL;
81 desc->irq_data.msi_desc = NULL;
82 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
83 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
84 desc->handle_irq = handle_bad_irq;
85 desc->depth = 1;
86 desc->irq_count = 0;
87 desc->irqs_unhandled = 0;
88 desc->name = NULL;
89 for_each_possible_cpu(cpu)
90 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
91 desc_smp_init(desc, node);
92}
93
94int nr_irqs = NR_IRQS;
95EXPORT_SYMBOL_GPL(nr_irqs);
96
97static DEFINE_MUTEX(sparse_irq_lock);
98static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
99
100#ifdef CONFIG_SPARSE_IRQ
101
102static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
103
104static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
105{
106 radix_tree_insert(&irq_desc_tree, irq, desc);
107}
108
109struct irq_desc *irq_to_desc(unsigned int irq)
110{
111 return radix_tree_lookup(&irq_desc_tree, irq);
112}
113
114static void delete_irq_desc(unsigned int irq)
115{
116 radix_tree_delete(&irq_desc_tree, irq);
117}
118
119#ifdef CONFIG_SMP
120static void free_masks(struct irq_desc *desc)
121{
122#ifdef CONFIG_GENERIC_PENDING_IRQ
123 free_cpumask_var(desc->pending_mask);
124#endif
125 free_cpumask_var(desc->irq_data.affinity);
126}
127#else
128static inline void free_masks(struct irq_desc *desc) { }
129#endif
130
131static struct irq_desc *alloc_desc(int irq, int node)
132{
133 struct irq_desc *desc;
134 gfp_t gfp = GFP_KERNEL;
135
136 desc = kzalloc_node(sizeof(*desc), gfp, node);
137 if (!desc)
138 return NULL;
139 /* allocate based on nr_cpu_ids */
140 desc->kstat_irqs = alloc_percpu(unsigned int);
141 if (!desc->kstat_irqs)
142 goto err_desc;
143
144 if (alloc_masks(desc, gfp, node))
145 goto err_kstat;
146
147 raw_spin_lock_init(&desc->lock);
148 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
149
150 desc_set_defaults(irq, desc, node);
151
152 return desc;
153
154err_kstat:
155 free_percpu(desc->kstat_irqs);
156err_desc:
157 kfree(desc);
158 return NULL;
159}
160
161static void free_desc(unsigned int irq)
162{
163 struct irq_desc *desc = irq_to_desc(irq);
164
165 unregister_irq_proc(irq, desc);
166
167 mutex_lock(&sparse_irq_lock);
168 delete_irq_desc(irq);
169 mutex_unlock(&sparse_irq_lock);
170
171 free_masks(desc);
172 free_percpu(desc->kstat_irqs);
173 kfree(desc);
174}
175
176static int alloc_descs(unsigned int start, unsigned int cnt, int node)
177{
178 struct irq_desc *desc;
179 int i;
180
181 for (i = 0; i < cnt; i++) {
182 desc = alloc_desc(start + i, node);
183 if (!desc)
184 goto err;
185 mutex_lock(&sparse_irq_lock);
186 irq_insert_desc(start + i, desc);
187 mutex_unlock(&sparse_irq_lock);
188 }
189 return start;
190
191err:
192 for (i--; i >= 0; i--)
193 free_desc(start + i);
194
195 mutex_lock(&sparse_irq_lock);
196 bitmap_clear(allocated_irqs, start, cnt);
197 mutex_unlock(&sparse_irq_lock);
198 return -ENOMEM;
199}
200
201static int irq_expand_nr_irqs(unsigned int nr)
202{
203 if (nr > IRQ_BITMAP_BITS)
204 return -ENOMEM;
205 nr_irqs = nr;
206 return 0;
207}
208
209int __init early_irq_init(void)
210{
211 int i, initcnt, node = first_online_node;
212 struct irq_desc *desc;
213
214 init_irq_default_affinity();
215
216 /* Let arch update nr_irqs and return the nr of preallocated irqs */
217 initcnt = arch_probe_nr_irqs();
218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219
220 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 nr_irqs = IRQ_BITMAP_BITS;
222
223 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 initcnt = IRQ_BITMAP_BITS;
225
226 if (initcnt > nr_irqs)
227 nr_irqs = initcnt;
228
229 for (i = 0; i < initcnt; i++) {
230 desc = alloc_desc(i, node);
231 set_bit(i, allocated_irqs);
232 irq_insert_desc(i, desc);
233 }
234 return arch_early_irq_init();
235}
236
237#else /* !CONFIG_SPARSE_IRQ */
238
239struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
240 [0 ... NR_IRQS-1] = {
241 .handle_irq = handle_bad_irq,
242 .depth = 1,
243 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
244 }
245};
246
247int __init early_irq_init(void)
248{
249 int count, i, node = first_online_node;
250 struct irq_desc *desc;
251
252 init_irq_default_affinity();
253
254 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
255
256 desc = irq_desc;
257 count = ARRAY_SIZE(irq_desc);
258
259 for (i = 0; i < count; i++) {
260 desc[i].kstat_irqs = alloc_percpu(unsigned int);
261 alloc_masks(&desc[i], GFP_KERNEL, node);
262 raw_spin_lock_init(&desc[i].lock);
263 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
264 desc_set_defaults(i, &desc[i], node);
265 }
266 return arch_early_irq_init();
267}
268
269struct irq_desc *irq_to_desc(unsigned int irq)
270{
271 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
272}
273
274static void free_desc(unsigned int irq)
275{
276 dynamic_irq_cleanup(irq);
277}
278
279static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
280{
281 return start;
282}
283
284static int irq_expand_nr_irqs(unsigned int nr)
285{
286 return -ENOMEM;
287}
288
289#endif /* !CONFIG_SPARSE_IRQ */
290
291/**
292 * generic_handle_irq - Invoke the handler for a particular irq
293 * @irq: The irq number to handle
294 *
295 */
296int generic_handle_irq(unsigned int irq)
297{
298 struct irq_desc *desc = irq_to_desc(irq);
299
300 if (!desc)
301 return -EINVAL;
302 generic_handle_irq_desc(irq, desc);
303 return 0;
304}
305EXPORT_SYMBOL_GPL(generic_handle_irq);
306
307/* Dynamic interrupt handling */
308
309/**
310 * irq_free_descs - free irq descriptors
311 * @from: Start of descriptor range
312 * @cnt: Number of consecutive irqs to free
313 */
314void irq_free_descs(unsigned int from, unsigned int cnt)
315{
316 int i;
317
318 if (from >= nr_irqs || (from + cnt) > nr_irqs)
319 return;
320
321 for (i = 0; i < cnt; i++)
322 free_desc(from + i);
323
324 mutex_lock(&sparse_irq_lock);
325 bitmap_clear(allocated_irqs, from, cnt);
326 mutex_unlock(&sparse_irq_lock);
327}
328EXPORT_SYMBOL_GPL(irq_free_descs);
329
330/**
331 * irq_alloc_descs - allocate and initialize a range of irq descriptors
332 * @irq: Allocate for specific irq number if irq >= 0
333 * @from: Start the search from this irq number
334 * @cnt: Number of consecutive irqs to allocate.
335 * @node: Preferred node on which the irq descriptor should be allocated
336 *
337 * Returns the first irq number or error code
338 */
339int __ref
340irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
341{
342 int start, ret;
343
344 if (!cnt)
345 return -EINVAL;
346
347 if (irq >= 0) {
348 if (from > irq)
349 return -EINVAL;
350 from = irq;
351 }
352
353 mutex_lock(&sparse_irq_lock);
354
355 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
356 from, cnt, 0);
357 ret = -EEXIST;
358 if (irq >=0 && start != irq)
359 goto err;
360
361 if (start + cnt > nr_irqs) {
362 ret = irq_expand_nr_irqs(start + cnt);
363 if (ret)
364 goto err;
365 }
366
367 bitmap_set(allocated_irqs, start, cnt);
368 mutex_unlock(&sparse_irq_lock);
369 return alloc_descs(start, cnt, node);
370
371err:
372 mutex_unlock(&sparse_irq_lock);
373 return ret;
374}
375EXPORT_SYMBOL_GPL(irq_alloc_descs);
376
377/**
378 * irq_reserve_irqs - mark irqs allocated
379 * @from: mark from irq number
380 * @cnt: number of irqs to mark
381 *
382 * Returns 0 on success or an appropriate error code
383 */
384int irq_reserve_irqs(unsigned int from, unsigned int cnt)
385{
386 unsigned int start;
387 int ret = 0;
388
389 if (!cnt || (from + cnt) > nr_irqs)
390 return -EINVAL;
391
392 mutex_lock(&sparse_irq_lock);
393 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
394 if (start == from)
395 bitmap_set(allocated_irqs, start, cnt);
396 else
397 ret = -EEXIST;
398 mutex_unlock(&sparse_irq_lock);
399 return ret;
400}
401
402/**
403 * irq_get_next_irq - get next allocated irq number
404 * @offset: where to start the search
405 *
406 * Returns next irq number after offset or nr_irqs if none is found.
407 */
408unsigned int irq_get_next_irq(unsigned int offset)
409{
410 return find_next_bit(allocated_irqs, nr_irqs, offset);
411}
412
413struct irq_desc *
414__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
415{
416 struct irq_desc *desc = irq_to_desc(irq);
417
418 if (desc) {
419 if (bus)
420 chip_bus_lock(desc);
421 raw_spin_lock_irqsave(&desc->lock, *flags);
422 }
423 return desc;
424}
425
426void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
427{
428 raw_spin_unlock_irqrestore(&desc->lock, flags);
429 if (bus)
430 chip_bus_sync_unlock(desc);
431}
432
433/**
434 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
435 * @irq: irq number to initialize
436 */
437void dynamic_irq_cleanup(unsigned int irq)
438{
439 struct irq_desc *desc = irq_to_desc(irq);
440 unsigned long flags;
441
442 raw_spin_lock_irqsave(&desc->lock, flags);
443 desc_set_defaults(irq, desc, desc_node(desc));
444 raw_spin_unlock_irqrestore(&desc->lock, flags);
445}
446
447unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
448{
449 struct irq_desc *desc = irq_to_desc(irq);
450
451 return desc && desc->kstat_irqs ?
452 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
453}
454
455unsigned int kstat_irqs(unsigned int irq)
456{
457 struct irq_desc *desc = irq_to_desc(irq);
458 int cpu;
459 int sum = 0;
460
461 if (!desc || !desc->kstat_irqs)
462 return 0;
463 for_each_possible_cpu(cpu)
464 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
465 return sum;
466}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c3003e9d91a3..0a7840aeb0fb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -17,6 +17,17 @@
17 17
18#include "internals.h" 18#include "internals.h"
19 19
20#ifdef CONFIG_IRQ_FORCED_THREADING
21__read_mostly bool force_irqthreads;
22
23static int __init setup_forced_irqthreads(char *arg)
24{
25 force_irqthreads = true;
26 return 0;
27}
28early_param("threadirqs", setup_forced_irqthreads);
29#endif
30
20/** 31/**
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for 33 * @irq: interrupt number to wait for
@@ -30,7 +41,7 @@
30void synchronize_irq(unsigned int irq) 41void synchronize_irq(unsigned int irq)
31{ 42{
32 struct irq_desc *desc = irq_to_desc(irq); 43 struct irq_desc *desc = irq_to_desc(irq);
33 unsigned int status; 44 bool inprogress;
34 45
35 if (!desc) 46 if (!desc)
36 return; 47 return;
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq)
42 * Wait until we're out of the critical section. This might 53 * Wait until we're out of the critical section. This might
43 * give the wrong answer due to the lack of memory barriers. 54 * give the wrong answer due to the lack of memory barriers.
44 */ 55 */
45 while (desc->status & IRQ_INPROGRESS) 56 while (irqd_irq_inprogress(&desc->irq_data))
46 cpu_relax(); 57 cpu_relax();
47 58
48 /* Ok, that indicated we're done: double-check carefully. */ 59 /* Ok, that indicated we're done: double-check carefully. */
49 raw_spin_lock_irqsave(&desc->lock, flags); 60 raw_spin_lock_irqsave(&desc->lock, flags);
50 status = desc->status; 61 inprogress = irqd_irq_inprogress(&desc->irq_data);
51 raw_spin_unlock_irqrestore(&desc->lock, flags); 62 raw_spin_unlock_irqrestore(&desc->lock, flags);
52 63
53 /* Oops, that failed? */ 64 /* Oops, that failed? */
54 } while (status & IRQ_INPROGRESS); 65 } while (inprogress);
55 66
56 /* 67 /*
57 * We made sure that no hardirq handler is running. Now verify 68 * We made sure that no hardirq handler is running. Now verify
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq)
73{ 84{
74 struct irq_desc *desc = irq_to_desc(irq); 85 struct irq_desc *desc = irq_to_desc(irq);
75 86
76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 87 if (!desc || !irqd_can_balance(&desc->irq_data) ||
77 !desc->chip->set_affinity) 88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
78 return 0; 89 return 0;
79 90
80 return 1; 91 return 1;
@@ -100,66 +111,180 @@ void irq_set_thread_affinity(struct irq_desc *desc)
100 } 111 }
101} 112}
102 113
114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_data *data)
116{
117 return irqd_can_move_in_process_context(data);
118}
119static inline bool irq_move_pending(struct irq_data *data)
120{
121 return irqd_is_setaffinity_pending(data);
122}
123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125{
126 cpumask_copy(desc->pending_mask, mask);
127}
128static inline void
129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130{
131 cpumask_copy(mask, desc->pending_mask);
132}
133#else
134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135static inline bool irq_move_pending(struct irq_data *data) { return false; }
136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif
141
142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143{
144 struct irq_chip *chip = irq_data_get_irq_chip(data);
145 struct irq_desc *desc = irq_data_to_desc(data);
146 int ret = 0;
147
148 if (!chip || !chip->irq_set_affinity)
149 return -EINVAL;
150
151 if (irq_can_move_pcntxt(data)) {
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else {
161 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask);
163 }
164
165 if (desc->affinity_notify) {
166 kref_get(&desc->affinity_notify->kref);
167 schedule_work(&desc->affinity_notify->work);
168 }
169 irqd_set(data, IRQD_AFFINITY_SET);
170
171 return ret;
172}
173
103/** 174/**
104 * irq_set_affinity - Set the irq affinity of a given irq 175 * irq_set_affinity - Set the irq affinity of a given irq
105 * @irq: Interrupt to set affinity 176 * @irq: Interrupt to set affinity
106 * @cpumask: cpumask 177 * @mask: cpumask
107 * 178 *
108 */ 179 */
109int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 180int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
110{ 181{
111 struct irq_desc *desc = irq_to_desc(irq); 182 struct irq_desc *desc = irq_to_desc(irq);
112 unsigned long flags; 183 unsigned long flags;
184 int ret;
113 185
114 if (!desc->chip->set_affinity) 186 if (!desc)
115 return -EINVAL; 187 return -EINVAL;
116 188
117 raw_spin_lock_irqsave(&desc->lock, flags); 189 raw_spin_lock_irqsave(&desc->lock, flags);
118 190 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
119#ifdef CONFIG_GENERIC_PENDING_IRQ
120 if (desc->status & IRQ_MOVE_PCNTXT) {
121 if (!desc->chip->set_affinity(irq, cpumask)) {
122 cpumask_copy(desc->affinity, cpumask);
123 irq_set_thread_affinity(desc);
124 }
125 }
126 else {
127 desc->status |= IRQ_MOVE_PENDING;
128 cpumask_copy(desc->pending_mask, cpumask);
129 }
130#else
131 if (!desc->chip->set_affinity(irq, cpumask)) {
132 cpumask_copy(desc->affinity, cpumask);
133 irq_set_thread_affinity(desc);
134 }
135#endif
136 desc->status |= IRQ_AFFINITY_SET;
137 raw_spin_unlock_irqrestore(&desc->lock, flags); 191 raw_spin_unlock_irqrestore(&desc->lock, flags);
138 return 0; 192 return ret;
139} 193}
140 194
141int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
142{ 196{
197 unsigned long flags;
198 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
199
200 if (!desc)
201 return -EINVAL;
202 desc->affinity_hint = m;
203 irq_put_desc_unlock(desc, flags);
204 return 0;
205}
206EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
207
208static void irq_affinity_notify(struct work_struct *work)
209{
210 struct irq_affinity_notify *notify =
211 container_of(work, struct irq_affinity_notify, work);
212 struct irq_desc *desc = irq_to_desc(notify->irq);
213 cpumask_var_t cpumask;
214 unsigned long flags;
215
216 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
217 goto out;
218
219 raw_spin_lock_irqsave(&desc->lock, flags);
220 if (irq_move_pending(&desc->irq_data))
221 irq_get_pending(cpumask, desc);
222 else
223 cpumask_copy(cpumask, desc->irq_data.affinity);
224 raw_spin_unlock_irqrestore(&desc->lock, flags);
225
226 notify->notify(notify, cpumask);
227
228 free_cpumask_var(cpumask);
229out:
230 kref_put(&notify->kref, notify->release);
231}
232
233/**
234 * irq_set_affinity_notifier - control notification of IRQ affinity changes
235 * @irq: Interrupt for which to enable/disable notification
236 * @notify: Context for notification, or %NULL to disable
237 * notification. Function pointers must be initialised;
238 * the other fields will be initialised by this function.
239 *
240 * Must be called in process context. Notification may only be enabled
241 * after the IRQ is allocated and must be disabled before the IRQ is
242 * freed using free_irq().
243 */
244int
245irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
246{
143 struct irq_desc *desc = irq_to_desc(irq); 247 struct irq_desc *desc = irq_to_desc(irq);
248 struct irq_affinity_notify *old_notify;
144 unsigned long flags; 249 unsigned long flags;
145 250
251 /* The release function is promised process context */
252 might_sleep();
253
146 if (!desc) 254 if (!desc)
147 return -EINVAL; 255 return -EINVAL;
148 256
257 /* Complete initialisation of *notify */
258 if (notify) {
259 notify->irq = irq;
260 kref_init(&notify->kref);
261 INIT_WORK(&notify->work, irq_affinity_notify);
262 }
263
149 raw_spin_lock_irqsave(&desc->lock, flags); 264 raw_spin_lock_irqsave(&desc->lock, flags);
150 desc->affinity_hint = m; 265 old_notify = desc->affinity_notify;
266 desc->affinity_notify = notify;
151 raw_spin_unlock_irqrestore(&desc->lock, flags); 267 raw_spin_unlock_irqrestore(&desc->lock, flags);
152 268
269 if (old_notify)
270 kref_put(&old_notify->kref, old_notify->release);
271
153 return 0; 272 return 0;
154} 273}
155EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 274EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
156 275
157#ifndef CONFIG_AUTO_IRQ_AFFINITY 276#ifndef CONFIG_AUTO_IRQ_AFFINITY
158/* 277/*
159 * Generic version of the affinity autoselector. 278 * Generic version of the affinity autoselector.
160 */ 279 */
161static int setup_affinity(unsigned int irq, struct irq_desc *desc) 280static int
281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
162{ 282{
283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity;
285 int ret;
286
287 /* Excludes PER_CPU and NO_BALANCE interrupts */
163 if (!irq_can_set_affinity(irq)) 288 if (!irq_can_set_affinity(irq))
164 return 0; 289 return 0;
165 290
@@ -167,22 +292,27 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
167 * Preserve an userspace affinity setup, but make sure that 292 * Preserve an userspace affinity setup, but make sure that
168 * one of the targets is online. 293 * one of the targets is online.
169 */ 294 */
170 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 295 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
171 if (cpumask_any_and(desc->affinity, cpu_online_mask) 296 if (cpumask_intersects(desc->irq_data.affinity,
172 < nr_cpu_ids) 297 cpu_online_mask))
173 goto set_affinity; 298 set = desc->irq_data.affinity;
174 else 299 else
175 desc->status &= ~IRQ_AFFINITY_SET; 300 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
176 } 301 }
177 302
178 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 303 cpumask_and(mask, cpu_online_mask, set);
179set_affinity: 304 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
180 desc->chip->set_affinity(irq, desc->affinity); 305 switch (ret) {
181 306 case IRQ_SET_MASK_OK:
307 cpumask_copy(desc->irq_data.affinity, mask);
308 case IRQ_SET_MASK_OK_NOCOPY:
309 irq_set_thread_affinity(desc);
310 }
182 return 0; 311 return 0;
183} 312}
184#else 313#else
185static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 314static inline int
315setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
186{ 316{
187 return irq_select_affinity(irq); 317 return irq_select_affinity(irq);
188} 318}
@@ -191,23 +321,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
191/* 321/*
192 * Called when affinity is set via /proc/irq 322 * Called when affinity is set via /proc/irq
193 */ 323 */
194int irq_select_affinity_usr(unsigned int irq) 324int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
195{ 325{
196 struct irq_desc *desc = irq_to_desc(irq); 326 struct irq_desc *desc = irq_to_desc(irq);
197 unsigned long flags; 327 unsigned long flags;
198 int ret; 328 int ret;
199 329
200 raw_spin_lock_irqsave(&desc->lock, flags); 330 raw_spin_lock_irqsave(&desc->lock, flags);
201 ret = setup_affinity(irq, desc); 331 ret = setup_affinity(irq, desc, mask);
202 if (!ret)
203 irq_set_thread_affinity(desc);
204 raw_spin_unlock_irqrestore(&desc->lock, flags); 332 raw_spin_unlock_irqrestore(&desc->lock, flags);
205
206 return ret; 333 return ret;
207} 334}
208 335
209#else 336#else
210static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 337static inline int
338setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
211{ 339{
212 return 0; 340 return 0;
213} 341}
@@ -218,13 +346,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
218 if (suspend) { 346 if (suspend) {
219 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) 347 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
220 return; 348 return;
221 desc->status |= IRQ_SUSPENDED; 349 desc->istate |= IRQS_SUSPENDED;
222 } 350 }
223 351
224 if (!desc->depth++) { 352 if (!desc->depth++)
225 desc->status |= IRQ_DISABLED; 353 irq_disable(desc);
226 desc->chip->disable(irq); 354}
227 } 355
356static int __disable_irq_nosync(unsigned int irq)
357{
358 unsigned long flags;
359 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
360
361 if (!desc)
362 return -EINVAL;
363 __disable_irq(desc, irq, false);
364 irq_put_desc_busunlock(desc, flags);
365 return 0;
228} 366}
229 367
230/** 368/**
@@ -240,17 +378,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
240 */ 378 */
241void disable_irq_nosync(unsigned int irq) 379void disable_irq_nosync(unsigned int irq)
242{ 380{
243 struct irq_desc *desc = irq_to_desc(irq); 381 __disable_irq_nosync(irq);
244 unsigned long flags;
245
246 if (!desc)
247 return;
248
249 chip_bus_lock(irq, desc);
250 raw_spin_lock_irqsave(&desc->lock, flags);
251 __disable_irq(desc, irq, false);
252 raw_spin_unlock_irqrestore(&desc->lock, flags);
253 chip_bus_sync_unlock(irq, desc);
254} 382}
255EXPORT_SYMBOL(disable_irq_nosync); 383EXPORT_SYMBOL(disable_irq_nosync);
256 384
@@ -268,21 +396,24 @@ EXPORT_SYMBOL(disable_irq_nosync);
268 */ 396 */
269void disable_irq(unsigned int irq) 397void disable_irq(unsigned int irq)
270{ 398{
271 struct irq_desc *desc = irq_to_desc(irq); 399 if (!__disable_irq_nosync(irq))
272
273 if (!desc)
274 return;
275
276 disable_irq_nosync(irq);
277 if (desc->action)
278 synchronize_irq(irq); 400 synchronize_irq(irq);
279} 401}
280EXPORT_SYMBOL(disable_irq); 402EXPORT_SYMBOL(disable_irq);
281 403
282void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 404void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
283{ 405{
284 if (resume) 406 if (resume) {
285 desc->status &= ~IRQ_SUSPENDED; 407 if (!(desc->istate & IRQS_SUSPENDED)) {
408 if (!desc->action)
409 return;
410 if (!(desc->action->flags & IRQF_FORCE_RESUME))
411 return;
412 /* Pretend that it got disabled ! */
413 desc->depth++;
414 }
415 desc->istate &= ~IRQS_SUSPENDED;
416 }
286 417
287 switch (desc->depth) { 418 switch (desc->depth) {
288 case 0: 419 case 0:
@@ -290,12 +421,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
290 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 421 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
291 break; 422 break;
292 case 1: { 423 case 1: {
293 unsigned int status = desc->status & ~IRQ_DISABLED; 424 if (desc->istate & IRQS_SUSPENDED)
294
295 if (desc->status & IRQ_SUSPENDED)
296 goto err_out; 425 goto err_out;
297 /* Prevent probing on this irq: */ 426 /* Prevent probing on this irq: */
298 desc->status = status | IRQ_NOPROBE; 427 irq_settings_set_noprobe(desc);
428 irq_enable(desc);
299 check_irq_resend(desc, irq); 429 check_irq_resend(desc, irq);
300 /* fall-through */ 430 /* fall-through */
301 } 431 }
@@ -313,21 +443,22 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
313 * IRQ line is re-enabled. 443 * IRQ line is re-enabled.
314 * 444 *
315 * This function may be called from IRQ context only when 445 * This function may be called from IRQ context only when
316 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 446 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
317 */ 447 */
318void enable_irq(unsigned int irq) 448void enable_irq(unsigned int irq)
319{ 449{
320 struct irq_desc *desc = irq_to_desc(irq);
321 unsigned long flags; 450 unsigned long flags;
451 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
322 452
323 if (!desc) 453 if (!desc)
324 return; 454 return;
455 if (WARN(!desc->irq_data.chip,
456 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
457 goto out;
325 458
326 chip_bus_lock(irq, desc);
327 raw_spin_lock_irqsave(&desc->lock, flags);
328 __enable_irq(desc, irq, false); 459 __enable_irq(desc, irq, false);
329 raw_spin_unlock_irqrestore(&desc->lock, flags); 460out:
330 chip_bus_sync_unlock(irq, desc); 461 irq_put_desc_busunlock(desc, flags);
331} 462}
332EXPORT_SYMBOL(enable_irq); 463EXPORT_SYMBOL(enable_irq);
333 464
@@ -336,14 +467,14 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
336 struct irq_desc *desc = irq_to_desc(irq); 467 struct irq_desc *desc = irq_to_desc(irq);
337 int ret = -ENXIO; 468 int ret = -ENXIO;
338 469
339 if (desc->chip->set_wake) 470 if (desc->irq_data.chip->irq_set_wake)
340 ret = desc->chip->set_wake(irq, on); 471 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
341 472
342 return ret; 473 return ret;
343} 474}
344 475
345/** 476/**
346 * set_irq_wake - control irq power management wakeup 477 * irq_set_irq_wake - control irq power management wakeup
347 * @irq: interrupt to control 478 * @irq: interrupt to control
348 * @on: enable/disable power management wakeup 479 * @on: enable/disable power management wakeup
349 * 480 *
@@ -354,23 +485,25 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
354 * Wakeup mode lets this IRQ wake the system from sleep 485 * Wakeup mode lets this IRQ wake the system from sleep
355 * states like "suspend to RAM". 486 * states like "suspend to RAM".
356 */ 487 */
357int set_irq_wake(unsigned int irq, unsigned int on) 488int irq_set_irq_wake(unsigned int irq, unsigned int on)
358{ 489{
359 struct irq_desc *desc = irq_to_desc(irq);
360 unsigned long flags; 490 unsigned long flags;
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
361 int ret = 0; 492 int ret = 0;
362 493
494 if (!desc)
495 return -EINVAL;
496
363 /* wakeup-capable irqs can be shared between drivers that 497 /* wakeup-capable irqs can be shared between drivers that
364 * don't need to have the same sleep mode behaviors. 498 * don't need to have the same sleep mode behaviors.
365 */ 499 */
366 raw_spin_lock_irqsave(&desc->lock, flags);
367 if (on) { 500 if (on) {
368 if (desc->wake_depth++ == 0) { 501 if (desc->wake_depth++ == 0) {
369 ret = set_irq_wake_real(irq, on); 502 ret = set_irq_wake_real(irq, on);
370 if (ret) 503 if (ret)
371 desc->wake_depth = 0; 504 desc->wake_depth = 0;
372 else 505 else
373 desc->status |= IRQ_WAKEUP; 506 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
374 } 507 }
375 } else { 508 } else {
376 if (desc->wake_depth == 0) { 509 if (desc->wake_depth == 0) {
@@ -380,14 +513,13 @@ int set_irq_wake(unsigned int irq, unsigned int on)
380 if (ret) 513 if (ret)
381 desc->wake_depth = 1; 514 desc->wake_depth = 1;
382 else 515 else
383 desc->status &= ~IRQ_WAKEUP; 516 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
384 } 517 }
385 } 518 }
386 519 irq_put_desc_busunlock(desc, flags);
387 raw_spin_unlock_irqrestore(&desc->lock, flags);
388 return ret; 520 return ret;
389} 521}
390EXPORT_SYMBOL(set_irq_wake); 522EXPORT_SYMBOL(irq_set_irq_wake);
391 523
392/* 524/*
393 * Internal function that tells the architecture code whether a 525 * Internal function that tells the architecture code whether a
@@ -396,45 +528,29 @@ EXPORT_SYMBOL(set_irq_wake);
396 */ 528 */
397int can_request_irq(unsigned int irq, unsigned long irqflags) 529int can_request_irq(unsigned int irq, unsigned long irqflags)
398{ 530{
399 struct irq_desc *desc = irq_to_desc(irq);
400 struct irqaction *action;
401 unsigned long flags; 531 unsigned long flags;
532 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
533 int canrequest = 0;
402 534
403 if (!desc) 535 if (!desc)
404 return 0; 536 return 0;
405 537
406 if (desc->status & IRQ_NOREQUEST) 538 if (irq_settings_can_request(desc)) {
407 return 0; 539 if (desc->action)
408 540 if (irqflags & desc->action->flags & IRQF_SHARED)
409 raw_spin_lock_irqsave(&desc->lock, flags); 541 canrequest =1;
410 action = desc->action; 542 }
411 if (action) 543 irq_put_desc_unlock(desc, flags);
412 if (irqflags & action->flags & IRQF_SHARED) 544 return canrequest;
413 action = NULL;
414
415 raw_spin_unlock_irqrestore(&desc->lock, flags);
416
417 return !action;
418}
419
420void compat_irq_chip_set_default_handler(struct irq_desc *desc)
421{
422 /*
423 * If the architecture still has not overriden
424 * the flow handler then zap the default. This
425 * should catch incorrect flow-type setting.
426 */
427 if (desc->handle_irq == &handle_bad_irq)
428 desc->handle_irq = NULL;
429} 545}
430 546
431int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 547int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
432 unsigned long flags) 548 unsigned long flags)
433{ 549{
434 int ret; 550 struct irq_chip *chip = desc->irq_data.chip;
435 struct irq_chip *chip = desc->chip; 551 int ret, unmask = 0;
436 552
437 if (!chip || !chip->set_type) { 553 if (!chip || !chip->irq_set_type) {
438 /* 554 /*
439 * IRQF_TRIGGER_* but the PIC does not support multiple 555 * IRQF_TRIGGER_* but the PIC does not support multiple
440 * flow-types? 556 * flow-types?
@@ -444,23 +560,41 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
444 return 0; 560 return 0;
445 } 561 }
446 562
447 /* caller masked out all except trigger mode flags */ 563 flags &= IRQ_TYPE_SENSE_MASK;
448 ret = chip->set_type(irq, flags); 564
449 565 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
450 if (ret) 566 if (!irqd_irq_masked(&desc->irq_data))
451 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 567 mask_irq(desc);
452 (int)flags, irq, chip->set_type); 568 if (!irqd_irq_disabled(&desc->irq_data))
453 else { 569 unmask = 1;
454 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
455 flags |= IRQ_LEVEL;
456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
458 desc->status |= flags;
459
460 if (chip != desc->chip)
461 irq_chip_set_defaults(desc->chip);
462 } 570 }
463 571
572 /* caller masked out all except trigger mode flags */
573 ret = chip->irq_set_type(&desc->irq_data, flags);
574
575 switch (ret) {
576 case IRQ_SET_MASK_OK:
577 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
578 irqd_set(&desc->irq_data, flags);
579
580 case IRQ_SET_MASK_OK_NOCOPY:
581 flags = irqd_get_trigger_type(&desc->irq_data);
582 irq_settings_set_trigger_mask(desc, flags);
583 irqd_clear(&desc->irq_data, IRQD_LEVEL);
584 irq_settings_clr_level(desc);
585 if (flags & IRQ_TYPE_LEVEL_MASK) {
586 irq_settings_set_level(desc);
587 irqd_set(&desc->irq_data, IRQD_LEVEL);
588 }
589
590 ret = 0;
591 break;
592 default:
593 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
594 flags, irq, chip->irq_set_type);
595 }
596 if (unmask)
597 unmask_irq(desc);
464 return ret; 598 return ret;
465} 599}
466 600
@@ -504,10 +638,13 @@ static int irq_wait_for_interrupt(struct irqaction *action)
504 * handler finished. unmask if the interrupt has not been disabled and 638 * handler finished. unmask if the interrupt has not been disabled and
505 * is marked MASKED. 639 * is marked MASKED.
506 */ 640 */
507static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 641static void irq_finalize_oneshot(struct irq_desc *desc,
642 struct irqaction *action, bool force)
508{ 643{
644 if (!(desc->istate & IRQS_ONESHOT))
645 return;
509again: 646again:
510 chip_bus_lock(irq, desc); 647 chip_bus_lock(desc);
511 raw_spin_lock_irq(&desc->lock); 648 raw_spin_lock_irq(&desc->lock);
512 649
513 /* 650 /*
@@ -517,26 +654,42 @@ again:
517 * The thread is faster done than the hard interrupt handler 654 * The thread is faster done than the hard interrupt handler
518 * on the other CPU. If we unmask the irq line then the 655 * on the other CPU. If we unmask the irq line then the
519 * interrupt can come in again and masks the line, leaves due 656 * interrupt can come in again and masks the line, leaves due
520 * to IRQ_INPROGRESS and the irq line is masked forever. 657 * to IRQS_INPROGRESS and the irq line is masked forever.
658 *
659 * This also serializes the state of shared oneshot handlers
660 * versus "desc->threads_onehsot |= action->thread_mask;" in
661 * irq_wake_thread(). See the comment there which explains the
662 * serialization.
521 */ 663 */
522 if (unlikely(desc->status & IRQ_INPROGRESS)) { 664 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
523 raw_spin_unlock_irq(&desc->lock); 665 raw_spin_unlock_irq(&desc->lock);
524 chip_bus_sync_unlock(irq, desc); 666 chip_bus_sync_unlock(desc);
525 cpu_relax(); 667 cpu_relax();
526 goto again; 668 goto again;
527 } 669 }
528 670
529 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 671 /*
530 desc->status &= ~IRQ_MASKED; 672 * Now check again, whether the thread should run. Otherwise
531 desc->chip->unmask(irq); 673 * we would clear the threads_oneshot bit of this thread which
532 } 674 * was just set.
675 */
676 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
677 goto out_unlock;
678
679 desc->threads_oneshot &= ~action->thread_mask;
680
681 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
682 irqd_irq_masked(&desc->irq_data))
683 unmask_irq(desc);
684
685out_unlock:
533 raw_spin_unlock_irq(&desc->lock); 686 raw_spin_unlock_irq(&desc->lock);
534 chip_bus_sync_unlock(irq, desc); 687 chip_bus_sync_unlock(desc);
535} 688}
536 689
537#ifdef CONFIG_SMP 690#ifdef CONFIG_SMP
538/* 691/*
539 * Check whether we need to change the affinity of the interrupt thread. 692 * Check whether we need to chasnge the affinity of the interrupt thread.
540 */ 693 */
541static void 694static void
542irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 695irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
@@ -556,7 +709,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
556 } 709 }
557 710
558 raw_spin_lock_irq(&desc->lock); 711 raw_spin_lock_irq(&desc->lock);
559 cpumask_copy(mask, desc->affinity); 712 cpumask_copy(mask, desc->irq_data.affinity);
560 raw_spin_unlock_irq(&desc->lock); 713 raw_spin_unlock_irq(&desc->lock);
561 714
562 set_cpus_allowed_ptr(current, mask); 715 set_cpus_allowed_ptr(current, mask);
@@ -568,14 +721,57 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
568#endif 721#endif
569 722
570/* 723/*
724 * Interrupts which are not explicitely requested as threaded
725 * interrupts rely on the implicit bh/preempt disable of the hard irq
726 * context. So we need to disable bh here to avoid deadlocks and other
727 * side effects.
728 */
729static irqreturn_t
730irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
731{
732 irqreturn_t ret;
733
734 local_bh_disable();
735 ret = action->thread_fn(action->irq, action->dev_id);
736 irq_finalize_oneshot(desc, action, false);
737 local_bh_enable();
738 return ret;
739}
740
741/*
742 * Interrupts explicitely requested as threaded interupts want to be
743 * preemtible - many of them need to sleep and wait for slow busses to
744 * complete.
745 */
746static irqreturn_t irq_thread_fn(struct irq_desc *desc,
747 struct irqaction *action)
748{
749 irqreturn_t ret;
750
751 ret = action->thread_fn(action->irq, action->dev_id);
752 irq_finalize_oneshot(desc, action, false);
753 return ret;
754}
755
756/*
571 * Interrupt handler thread 757 * Interrupt handler thread
572 */ 758 */
573static int irq_thread(void *data) 759static int irq_thread(void *data)
574{ 760{
575 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 761 static const struct sched_param param = {
762 .sched_priority = MAX_USER_RT_PRIO/2,
763 };
576 struct irqaction *action = data; 764 struct irqaction *action = data;
577 struct irq_desc *desc = irq_to_desc(action->irq); 765 struct irq_desc *desc = irq_to_desc(action->irq);
578 int wake, oneshot = desc->status & IRQ_ONESHOT; 766 irqreturn_t (*handler_fn)(struct irq_desc *desc,
767 struct irqaction *action);
768 int wake;
769
770 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
771 &action->thread_flags))
772 handler_fn = irq_forced_thread_fn;
773 else
774 handler_fn = irq_thread_fn;
579 775
580 sched_setscheduler(current, SCHED_FIFO, &param); 776 sched_setscheduler(current, SCHED_FIFO, &param);
581 current->irqaction = action; 777 current->irqaction = action;
@@ -587,23 +783,23 @@ static int irq_thread(void *data)
587 atomic_inc(&desc->threads_active); 783 atomic_inc(&desc->threads_active);
588 784
589 raw_spin_lock_irq(&desc->lock); 785 raw_spin_lock_irq(&desc->lock);
590 if (unlikely(desc->status & IRQ_DISABLED)) { 786 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
591 /* 787 /*
592 * CHECKME: We might need a dedicated 788 * CHECKME: We might need a dedicated
593 * IRQ_THREAD_PENDING flag here, which 789 * IRQ_THREAD_PENDING flag here, which
594 * retriggers the thread in check_irq_resend() 790 * retriggers the thread in check_irq_resend()
595 * but AFAICT IRQ_PENDING should be fine as it 791 * but AFAICT IRQS_PENDING should be fine as it
596 * retriggers the interrupt itself --- tglx 792 * retriggers the interrupt itself --- tglx
597 */ 793 */
598 desc->status |= IRQ_PENDING; 794 desc->istate |= IRQS_PENDING;
599 raw_spin_unlock_irq(&desc->lock); 795 raw_spin_unlock_irq(&desc->lock);
600 } else { 796 } else {
601 raw_spin_unlock_irq(&desc->lock); 797 irqreturn_t action_ret;
602
603 action->thread_fn(action->irq, action->dev_id);
604 798
605 if (oneshot) 799 raw_spin_unlock_irq(&desc->lock);
606 irq_finalize_oneshot(action->irq, desc); 800 action_ret = handler_fn(desc, action);
801 if (!noirqdebug)
802 note_interrupt(action->irq, desc, action_ret);
607 } 803 }
608 804
609 wake = atomic_dec_and_test(&desc->threads_active); 805 wake = atomic_dec_and_test(&desc->threads_active);
@@ -612,6 +808,9 @@ static int irq_thread(void *data)
612 wake_up(&desc->wait_for_threads); 808 wake_up(&desc->wait_for_threads);
613 } 809 }
614 810
811 /* Prevent a stale desc->threads_oneshot */
812 irq_finalize_oneshot(desc, action, true);
813
615 /* 814 /*
616 * Clear irqaction. Otherwise exit_irq_thread() would make 815 * Clear irqaction. Otherwise exit_irq_thread() would make
617 * fuzz about an active irq thread going into nirvana. 816 * fuzz about an active irq thread going into nirvana.
@@ -626,6 +825,7 @@ static int irq_thread(void *data)
626void exit_irq_thread(void) 825void exit_irq_thread(void)
627{ 826{
628 struct task_struct *tsk = current; 827 struct task_struct *tsk = current;
828 struct irq_desc *desc;
629 829
630 if (!tsk->irqaction) 830 if (!tsk->irqaction)
631 return; 831 return;
@@ -634,6 +834,14 @@ void exit_irq_thread(void)
634 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 834 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
635 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 835 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
636 836
837 desc = irq_to_desc(tsk->irqaction->irq);
838
839 /*
840 * Prevent a stale desc->threads_oneshot. Must be called
841 * before setting the IRQTF_DIED flag.
842 */
843 irq_finalize_oneshot(desc, tsk->irqaction, true);
844
637 /* 845 /*
638 * Set the THREAD DIED flag to prevent further wakeups of the 846 * Set the THREAD DIED flag to prevent further wakeups of the
639 * soon to be gone threaded handler. 847 * soon to be gone threaded handler.
@@ -641,6 +849,22 @@ void exit_irq_thread(void)
641 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 849 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
642} 850}
643 851
852static void irq_setup_forced_threading(struct irqaction *new)
853{
854 if (!force_irqthreads)
855 return;
856 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
857 return;
858
859 new->flags |= IRQF_ONESHOT;
860
861 if (!new->thread_fn) {
862 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
863 new->thread_fn = new->handler;
864 new->handler = irq_default_primary_handler;
865 }
866}
867
644/* 868/*
645 * Internal function to register an irqaction - typically used to 869 * Internal function to register an irqaction - typically used to
646 * allocate special interrupts that are part of the architecture. 870 * allocate special interrupts that are part of the architecture.
@@ -650,14 +874,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
650{ 874{
651 struct irqaction *old, **old_ptr; 875 struct irqaction *old, **old_ptr;
652 const char *old_name = NULL; 876 const char *old_name = NULL;
653 unsigned long flags; 877 unsigned long flags, thread_mask = 0;
654 int nested, shared = 0; 878 int ret, nested, shared = 0;
655 int ret; 879 cpumask_var_t mask;
656 880
657 if (!desc) 881 if (!desc)
658 return -EINVAL; 882 return -EINVAL;
659 883
660 if (desc->chip == &no_irq_chip) 884 if (desc->irq_data.chip == &no_irq_chip)
661 return -ENOSYS; 885 return -ENOSYS;
662 /* 886 /*
663 * Some drivers like serial.c use request_irq() heavily, 887 * Some drivers like serial.c use request_irq() heavily,
@@ -676,15 +900,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
676 rand_initialize_irq(irq); 900 rand_initialize_irq(irq);
677 } 901 }
678 902
679 /* Oneshot interrupts are not allowed with shared */
680 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
681 return -EINVAL;
682
683 /* 903 /*
684 * Check whether the interrupt nests into another interrupt 904 * Check whether the interrupt nests into another interrupt
685 * thread. 905 * thread.
686 */ 906 */
687 nested = desc->status & IRQ_NESTED_THREAD; 907 nested = irq_settings_is_nested_thread(desc);
688 if (nested) { 908 if (nested) {
689 if (!new->thread_fn) 909 if (!new->thread_fn)
690 return -EINVAL; 910 return -EINVAL;
@@ -694,6 +914,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
694 * dummy function which warns when called. 914 * dummy function which warns when called.
695 */ 915 */
696 new->handler = irq_nested_primary_handler; 916 new->handler = irq_nested_primary_handler;
917 } else {
918 if (irq_settings_can_thread(desc))
919 irq_setup_forced_threading(new);
697 } 920 }
698 921
699 /* 922 /*
@@ -717,6 +940,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
717 new->thread = t; 940 new->thread = t;
718 } 941 }
719 942
943 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
944 ret = -ENOMEM;
945 goto out_thread;
946 }
947
720 /* 948 /*
721 * The following block of code has to be executed atomically 949 * The following block of code has to be executed atomically
722 */ 950 */
@@ -728,32 +956,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
728 * Can't share interrupts unless both agree to and are 956 * Can't share interrupts unless both agree to and are
729 * the same type (level, edge, polarity). So both flag 957 * the same type (level, edge, polarity). So both flag
730 * fields must have IRQF_SHARED set and the bits which 958 * fields must have IRQF_SHARED set and the bits which
731 * set the trigger type must match. 959 * set the trigger type must match. Also all must
960 * agree on ONESHOT.
732 */ 961 */
733 if (!((old->flags & new->flags) & IRQF_SHARED) || 962 if (!((old->flags & new->flags) & IRQF_SHARED) ||
734 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 963 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
964 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
735 old_name = old->name; 965 old_name = old->name;
736 goto mismatch; 966 goto mismatch;
737 } 967 }
738 968
739#if defined(CONFIG_IRQ_PER_CPU)
740 /* All handlers must agree on per-cpuness */ 969 /* All handlers must agree on per-cpuness */
741 if ((old->flags & IRQF_PERCPU) != 970 if ((old->flags & IRQF_PERCPU) !=
742 (new->flags & IRQF_PERCPU)) 971 (new->flags & IRQF_PERCPU))
743 goto mismatch; 972 goto mismatch;
744#endif
745 973
746 /* add new interrupt at end of irq queue */ 974 /* add new interrupt at end of irq queue */
747 do { 975 do {
976 thread_mask |= old->thread_mask;
748 old_ptr = &old->next; 977 old_ptr = &old->next;
749 old = *old_ptr; 978 old = *old_ptr;
750 } while (old); 979 } while (old);
751 shared = 1; 980 shared = 1;
752 } 981 }
753 982
754 if (!shared) { 983 /*
755 irq_chip_set_defaults(desc->chip); 984 * Setup the thread mask for this irqaction. Unlikely to have
985 * 32 resp 64 irqs sharing one line, but who knows.
986 */
987 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
988 ret = -EBUSY;
989 goto out_mask;
990 }
991 new->thread_mask = 1 << ffz(thread_mask);
756 992
993 if (!shared) {
757 init_waitqueue_head(&desc->wait_for_threads); 994 init_waitqueue_head(&desc->wait_for_threads);
758 995
759 /* Setup the type (level, edge polarity) if configured: */ 996 /* Setup the type (level, edge polarity) if configured: */
@@ -762,42 +999,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
762 new->flags & IRQF_TRIGGER_MASK); 999 new->flags & IRQF_TRIGGER_MASK);
763 1000
764 if (ret) 1001 if (ret)
765 goto out_thread; 1002 goto out_mask;
766 } else 1003 }
767 compat_irq_chip_set_default_handler(desc); 1004
768#if defined(CONFIG_IRQ_PER_CPU) 1005 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
769 if (new->flags & IRQF_PERCPU) 1006 IRQS_ONESHOT | IRQS_WAITING);
770 desc->status |= IRQ_PER_CPU; 1007 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
771#endif
772 1008
773 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | 1009 if (new->flags & IRQF_PERCPU) {
774 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 1010 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1011 irq_settings_set_per_cpu(desc);
1012 }
775 1013
776 if (new->flags & IRQF_ONESHOT) 1014 if (new->flags & IRQF_ONESHOT)
777 desc->status |= IRQ_ONESHOT; 1015 desc->istate |= IRQS_ONESHOT;
778 1016
779 if (!(desc->status & IRQ_NOAUTOEN)) { 1017 if (irq_settings_can_autoenable(desc))
780 desc->depth = 0; 1018 irq_startup(desc);
781 desc->status &= ~IRQ_DISABLED; 1019 else
782 desc->chip->startup(irq);
783 } else
784 /* Undo nested disables: */ 1020 /* Undo nested disables: */
785 desc->depth = 1; 1021 desc->depth = 1;
786 1022
787 /* Exclude IRQ from balancing if requested */ 1023 /* Exclude IRQ from balancing if requested */
788 if (new->flags & IRQF_NOBALANCING) 1024 if (new->flags & IRQF_NOBALANCING) {
789 desc->status |= IRQ_NO_BALANCING; 1025 irq_settings_set_no_balancing(desc);
1026 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1027 }
790 1028
791 /* Set default affinity mask once everything is setup */ 1029 /* Set default affinity mask once everything is setup */
792 setup_affinity(irq, desc); 1030 setup_affinity(irq, desc, mask);
793 1031
794 } else if ((new->flags & IRQF_TRIGGER_MASK) 1032 } else if (new->flags & IRQF_TRIGGER_MASK) {
795 && (new->flags & IRQF_TRIGGER_MASK) 1033 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
796 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 1034 unsigned int omsk = irq_settings_get_trigger_mask(desc);
797 /* hope the handler works with the actual trigger mode... */ 1035
798 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 1036 if (nmsk != omsk)
799 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 1037 /* hope the handler works with current trigger mode */
800 (int)(new->flags & IRQF_TRIGGER_MASK)); 1038 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1039 irq, nmsk, omsk);
801 } 1040 }
802 1041
803 new->irq = irq; 1042 new->irq = irq;
@@ -811,8 +1050,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
811 * Check whether we disabled the irq via the spurious handler 1050 * Check whether we disabled the irq via the spurious handler
812 * before. Reenable it and give it another chance. 1051 * before. Reenable it and give it another chance.
813 */ 1052 */
814 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 1053 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
815 desc->status &= ~IRQ_SPURIOUS_DISABLED; 1054 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
816 __enable_irq(desc, irq, false); 1055 __enable_irq(desc, irq, false);
817 } 1056 }
818 1057
@@ -828,6 +1067,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
828 register_irq_proc(irq, desc); 1067 register_irq_proc(irq, desc);
829 new->dir = NULL; 1068 new->dir = NULL;
830 register_handler_proc(irq, new); 1069 register_handler_proc(irq, new);
1070 free_cpumask_var(mask);
831 1071
832 return 0; 1072 return 0;
833 1073
@@ -842,8 +1082,11 @@ mismatch:
842#endif 1082#endif
843 ret = -EBUSY; 1083 ret = -EBUSY;
844 1084
845out_thread: 1085out_mask:
846 raw_spin_unlock_irqrestore(&desc->lock, flags); 1086 raw_spin_unlock_irqrestore(&desc->lock, flags);
1087 free_cpumask_var(mask);
1088
1089out_thread:
847 if (new->thread) { 1090 if (new->thread) {
848 struct task_struct *t = new->thread; 1091 struct task_struct *t = new->thread;
849 1092
@@ -864,9 +1107,14 @@ out_thread:
864 */ 1107 */
865int setup_irq(unsigned int irq, struct irqaction *act) 1108int setup_irq(unsigned int irq, struct irqaction *act)
866{ 1109{
1110 int retval;
867 struct irq_desc *desc = irq_to_desc(irq); 1111 struct irq_desc *desc = irq_to_desc(irq);
868 1112
869 return __setup_irq(irq, desc, act); 1113 chip_bus_lock(desc);
1114 retval = __setup_irq(irq, desc, act);
1115 chip_bus_sync_unlock(desc);
1116
1117 return retval;
870} 1118}
871EXPORT_SYMBOL_GPL(setup_irq); 1119EXPORT_SYMBOL_GPL(setup_irq);
872 1120
@@ -912,18 +1160,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
912 1160
913 /* Currently used only by UML, might disappear one day: */ 1161 /* Currently used only by UML, might disappear one day: */
914#ifdef CONFIG_IRQ_RELEASE_METHOD 1162#ifdef CONFIG_IRQ_RELEASE_METHOD
915 if (desc->chip->release) 1163 if (desc->irq_data.chip->release)
916 desc->chip->release(irq, dev_id); 1164 desc->irq_data.chip->release(irq, dev_id);
917#endif 1165#endif
918 1166
919 /* If this was the last handler, shut down the IRQ line: */ 1167 /* If this was the last handler, shut down the IRQ line: */
920 if (!desc->action) { 1168 if (!desc->action)
921 desc->status |= IRQ_DISABLED; 1169 irq_shutdown(desc);
922 if (desc->chip->shutdown)
923 desc->chip->shutdown(irq);
924 else
925 desc->chip->disable(irq);
926 }
927 1170
928#ifdef CONFIG_SMP 1171#ifdef CONFIG_SMP
929 /* make sure affinity_hint is cleaned up */ 1172 /* make sure affinity_hint is cleaned up */
@@ -997,9 +1240,14 @@ void free_irq(unsigned int irq, void *dev_id)
997 if (!desc) 1240 if (!desc)
998 return; 1241 return;
999 1242
1000 chip_bus_lock(irq, desc); 1243#ifdef CONFIG_SMP
1244 if (WARN_ON(desc->affinity_notify))
1245 desc->affinity_notify = NULL;
1246#endif
1247
1248 chip_bus_lock(desc);
1001 kfree(__free_irq(irq, dev_id)); 1249 kfree(__free_irq(irq, dev_id));
1002 chip_bus_sync_unlock(irq, desc); 1250 chip_bus_sync_unlock(desc);
1003} 1251}
1004EXPORT_SYMBOL(free_irq); 1252EXPORT_SYMBOL(free_irq);
1005 1253
@@ -1067,7 +1315,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1067 if (!desc) 1315 if (!desc)
1068 return -EINVAL; 1316 return -EINVAL;
1069 1317
1070 if (desc->status & IRQ_NOREQUEST) 1318 if (!irq_settings_can_request(desc))
1071 return -EINVAL; 1319 return -EINVAL;
1072 1320
1073 if (!handler) { 1321 if (!handler) {
@@ -1086,14 +1334,14 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1086 action->name = devname; 1334 action->name = devname;
1087 action->dev_id = dev_id; 1335 action->dev_id = dev_id;
1088 1336
1089 chip_bus_lock(irq, desc); 1337 chip_bus_lock(desc);
1090 retval = __setup_irq(irq, desc, action); 1338 retval = __setup_irq(irq, desc, action);
1091 chip_bus_sync_unlock(irq, desc); 1339 chip_bus_sync_unlock(desc);
1092 1340
1093 if (retval) 1341 if (retval)
1094 kfree(action); 1342 kfree(action);
1095 1343
1096#ifdef CONFIG_DEBUG_SHIRQ 1344#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1097 if (!retval && (irqflags & IRQF_SHARED)) { 1345 if (!retval && (irqflags & IRQF_SHARED)) {
1098 /* 1346 /*
1099 * It's a shared IRQ -- the driver ought to be prepared for it 1347 * It's a shared IRQ -- the driver ought to be prepared for it
@@ -1142,7 +1390,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1142 if (!desc) 1390 if (!desc)
1143 return -EINVAL; 1391 return -EINVAL;
1144 1392
1145 if (desc->status & IRQ_NESTED_THREAD) { 1393 if (irq_settings_is_nested_thread(desc)) {
1146 ret = request_threaded_irq(irq, NULL, handler, 1394 ret = request_threaded_irq(irq, NULL, handler,
1147 flags, name, dev_id); 1395 flags, name, dev_id);
1148 return !ret ? IRQC_IS_NESTED : ret; 1396 return !ret ? IRQC_IS_NESTED : ret;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 241962280836..47420908fba0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,27 +4,28 @@
4 4
5#include "internals.h" 5#include "internals.h"
6 6
7void move_masked_irq(int irq) 7void irq_move_masked_irq(struct irq_data *idata)
8{ 8{
9 struct irq_desc *desc = irq_to_desc(irq); 9 struct irq_desc *desc = irq_data_to_desc(idata);
10 struct irq_chip *chip = idata->chip;
10 11
11 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 12 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
12 return; 13 return;
13 14
14 /* 15 /*
15 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. 16 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
16 */ 17 */
17 if (CHECK_IRQ_PER_CPU(desc->status)) { 18 if (!irqd_can_balance(&desc->irq_data)) {
18 WARN_ON(1); 19 WARN_ON(1);
19 return; 20 return;
20 } 21 }
21 22
22 desc->status &= ~IRQ_MOVE_PENDING; 23 irqd_clr_move_pending(&desc->irq_data);
23 24
24 if (unlikely(cpumask_empty(desc->pending_mask))) 25 if (unlikely(cpumask_empty(desc->pending_mask)))
25 return; 26 return;
26 27
27 if (!desc->chip->set_affinity) 28 if (!chip->irq_set_affinity)
28 return; 29 return;
29 30
30 assert_raw_spin_locked(&desc->lock); 31 assert_raw_spin_locked(&desc->lock);
@@ -34,7 +35,7 @@ void move_masked_irq(int irq)
34 * do the disable, re-program, enable sequence. 35 * do the disable, re-program, enable sequence.
35 * This is *not* particularly important for level triggered 36 * This is *not* particularly important for level triggered
36 * but in a edge trigger case, we might be setting rte 37 * but in a edge trigger case, we might be setting rte
37 * when an active trigger is comming in. This could 38 * when an active trigger is coming in. This could
38 * cause some ioapics to mal-function. 39 * cause some ioapics to mal-function.
39 * Being paranoid i guess! 40 * Being paranoid i guess!
40 * 41 *
@@ -43,26 +44,34 @@ void move_masked_irq(int irq)
43 */ 44 */
44 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
45 < nr_cpu_ids)) 46 < nr_cpu_ids))
46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 47 if (!chip->irq_set_affinity(&desc->irq_data,
47 cpumask_copy(desc->affinity, desc->pending_mask); 48 desc->pending_mask, false)) {
49 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
48 irq_set_thread_affinity(desc); 50 irq_set_thread_affinity(desc);
49 } 51 }
50 52
51 cpumask_clear(desc->pending_mask); 53 cpumask_clear(desc->pending_mask);
52} 54}
53 55
54void move_native_irq(int irq) 56void irq_move_irq(struct irq_data *idata)
55{ 57{
56 struct irq_desc *desc = irq_to_desc(irq); 58 bool masked;
57 59
58 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 60 if (likely(!irqd_is_setaffinity_pending(idata)))
59 return; 61 return;
60 62
61 if (unlikely(desc->status & IRQ_DISABLED)) 63 if (unlikely(irqd_irq_disabled(idata)))
62 return; 64 return;
63 65
64 desc->chip->mask(irq); 66 /*
65 move_masked_irq(irq); 67 * Be careful vs. already masked interrupts. If this is a
66 desc->chip->unmask(irq); 68 * threaded interrupt with ONESHOT set, we can end up with an
69 * interrupt storm.
70 */
71 masked = irqd_irq_masked(idata);
72 if (!masked)
73 idata->chip->irq_mask(idata);
74 irq_move_masked_irq(idata);
75 if (!masked)
76 idata->chip->irq_unmask(idata);
67} 77}
68
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
deleted file mode 100644
index 65d3845665ac..000000000000
--- a/kernel/irq/numa_migrate.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * NUMA irq-desc migration code
3 *
4 * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
5 * the new "home node" of the IRQ.
6 */
7
8#include <linux/irq.h>
9#include <linux/slab.h>
10#include <linux/module.h>
11#include <linux/random.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14
15#include "internals.h"
16
17static void init_copy_kstat_irqs(struct irq_desc *old_desc,
18 struct irq_desc *desc,
19 int node, int nr)
20{
21 init_kstat_irqs(desc, node, nr);
22
23 if (desc->kstat_irqs != old_desc->kstat_irqs)
24 memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
25 nr * sizeof(*desc->kstat_irqs));
26}
27
28static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
29{
30 if (old_desc->kstat_irqs == desc->kstat_irqs)
31 return;
32
33 kfree(old_desc->kstat_irqs);
34 old_desc->kstat_irqs = NULL;
35}
36
37static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
38 struct irq_desc *desc, int node)
39{
40 memcpy(desc, old_desc, sizeof(struct irq_desc));
41 if (!alloc_desc_masks(desc, node, false)) {
42 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
43 "for migration.\n", irq);
44 return false;
45 }
46 raw_spin_lock_init(&desc->lock);
47 desc->node = node;
48 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
49 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
50 init_copy_desc_masks(old_desc, desc);
51 arch_init_copy_chip_data(old_desc, desc, node);
52 return true;
53}
54
55static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
56{
57 free_kstat_irqs(old_desc, desc);
58 free_desc_masks(old_desc, desc);
59 arch_free_chip_data(old_desc, desc);
60}
61
62static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
63 int node)
64{
65 struct irq_desc *desc;
66 unsigned int irq;
67 unsigned long flags;
68
69 irq = old_desc->irq;
70
71 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
72
73 /* We have to check it to avoid races with another CPU */
74 desc = irq_to_desc(irq);
75
76 if (desc && old_desc != desc)
77 goto out_unlock;
78
79 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
80 if (!desc) {
81 printk(KERN_ERR "irq %d: can not get new irq_desc "
82 "for migration.\n", irq);
83 /* still use old one */
84 desc = old_desc;
85 goto out_unlock;
86 }
87 if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
88 /* still use old one */
89 kfree(desc);
90 desc = old_desc;
91 goto out_unlock;
92 }
93
94 replace_irq_desc(irq, desc);
95 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
96
97 /* free the old one */
98 free_one_irq_desc(old_desc, desc);
99 kfree(old_desc);
100
101 return desc;
102
103out_unlock:
104 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
105
106 return desc;
107}
108
109struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
110{
111 /* those static or target node is -1, do not move them */
112 if (desc->irq < NR_IRQS_LEGACY || node == -1)
113 return desc;
114
115 if (desc->node != node)
116 desc = __real_move_irq_desc(desc, node);
117
118 return desc;
119}
120
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 0d4005d85b03..f76fc00c9877 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -18,7 +18,7 @@
18 * During system-wide suspend or hibernation device drivers need to be prevented 18 * During system-wide suspend or hibernation device drivers need to be prevented
19 * from receiving interrupts and this function is provided for this purpose. 19 * from receiving interrupts and this function is provided for this purpose.
20 * It marks all interrupt lines in use, except for the timer ones, as disabled 20 * It marks all interrupt lines in use, except for the timer ones, as disabled
21 * and sets the IRQ_SUSPENDED flag for each of them. 21 * and sets the IRQS_SUSPENDED flag for each of them.
22 */ 22 */
23void suspend_device_irqs(void) 23void suspend_device_irqs(void)
24{ 24{
@@ -34,7 +34,7 @@ void suspend_device_irqs(void)
34 } 34 }
35 35
36 for_each_irq_desc(irq, desc) 36 for_each_irq_desc(irq, desc)
37 if (desc->status & IRQ_SUSPENDED) 37 if (desc->istate & IRQS_SUSPENDED)
38 synchronize_irq(irq); 38 synchronize_irq(irq);
39} 39}
40EXPORT_SYMBOL_GPL(suspend_device_irqs); 40EXPORT_SYMBOL_GPL(suspend_device_irqs);
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs);
43 * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() 43 * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
44 * 44 *
45 * Enable all interrupt lines previously disabled by suspend_device_irqs() that 45 * Enable all interrupt lines previously disabled by suspend_device_irqs() that
46 * have the IRQ_SUSPENDED flag set. 46 * have the IRQS_SUSPENDED flag set.
47 */ 47 */
48void resume_device_irqs(void) 48void resume_device_irqs(void)
49{ 49{
@@ -53,9 +53,6 @@ void resume_device_irqs(void)
53 for_each_irq_desc(irq, desc) { 53 for_each_irq_desc(irq, desc) {
54 unsigned long flags; 54 unsigned long flags;
55 55
56 if (!(desc->status & IRQ_SUSPENDED))
57 continue;
58
59 raw_spin_lock_irqsave(&desc->lock, flags); 56 raw_spin_lock_irqsave(&desc->lock, flags);
60 __enable_irq(desc, irq, true); 57 __enable_irq(desc, irq, true);
61 raw_spin_unlock_irqrestore(&desc->lock, flags); 58 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -71,9 +68,24 @@ int check_wakeup_irqs(void)
71 struct irq_desc *desc; 68 struct irq_desc *desc;
72 int irq; 69 int irq;
73 70
74 for_each_irq_desc(irq, desc) 71 for_each_irq_desc(irq, desc) {
75 if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING)) 72 if (irqd_is_wakeup_set(&desc->irq_data)) {
76 return -EBUSY; 73 if (desc->istate & IRQS_PENDING)
74 return -EBUSY;
75 continue;
76 }
77 /*
78 * Check the non wakeup interrupts whether they need
79 * to be masked before finally going into suspend
80 * state. That's for hardware which has no wakeup
81 * source configuration facility. The chip
82 * implementation indicates that with
83 * IRQCHIP_MASK_ON_SUSPEND.
84 */
85 if (desc->istate & IRQS_SUSPENDED &&
86 irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
87 mask_irq(desc);
88 }
77 89
78 return 0; 90 return 0;
79} 91}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 09a2ee540bd2..4bd4faa6323a 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -11,6 +11,7 @@
11#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
14 15
15#include "internals.h" 16#include "internals.h"
16 17
@@ -18,16 +19,19 @@ static struct proc_dir_entry *root_irq_dir;
18 19
19#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
20 21
21static int irq_affinity_proc_show(struct seq_file *m, void *v) 22static int show_irq_affinity(int type, struct seq_file *m, void *v)
22{ 23{
23 struct irq_desc *desc = irq_to_desc((long)m->private); 24 struct irq_desc *desc = irq_to_desc((long)m->private);
24 const struct cpumask *mask = desc->affinity; 25 const struct cpumask *mask = desc->irq_data.affinity;
25 26
26#ifdef CONFIG_GENERIC_PENDING_IRQ 27#ifdef CONFIG_GENERIC_PENDING_IRQ
27 if (desc->status & IRQ_MOVE_PENDING) 28 if (irqd_is_setaffinity_pending(&desc->irq_data))
28 mask = desc->pending_mask; 29 mask = desc->pending_mask;
29#endif 30#endif
30 seq_cpumask(m, mask); 31 if (type)
32 seq_cpumask_list(m, mask);
33 else
34 seq_cpumask(m, mask);
31 seq_putc(m, '\n'); 35 seq_putc(m, '\n');
32 return 0; 36 return 0;
33} 37}
@@ -58,21 +62,34 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
58#endif 62#endif
59 63
60int no_irq_affinity; 64int no_irq_affinity;
61static ssize_t irq_affinity_proc_write(struct file *file, 65static int irq_affinity_proc_show(struct seq_file *m, void *v)
66{
67 return show_irq_affinity(0, m, v);
68}
69
70static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
71{
72 return show_irq_affinity(1, m, v);
73}
74
75
76static ssize_t write_irq_affinity(int type, struct file *file,
62 const char __user *buffer, size_t count, loff_t *pos) 77 const char __user *buffer, size_t count, loff_t *pos)
63{ 78{
64 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 79 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
65 cpumask_var_t new_value; 80 cpumask_var_t new_value;
66 int err; 81 int err;
67 82
68 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 83 if (!irq_can_set_affinity(irq) || no_irq_affinity)
69 irq_balancing_disabled(irq))
70 return -EIO; 84 return -EIO;
71 85
72 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 86 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
73 return -ENOMEM; 87 return -ENOMEM;
74 88
75 err = cpumask_parse_user(buffer, count, new_value); 89 if (type)
90 err = cpumask_parselist_user(buffer, count, new_value);
91 else
92 err = cpumask_parse_user(buffer, count, new_value);
76 if (err) 93 if (err)
77 goto free_cpumask; 94 goto free_cpumask;
78 95
@@ -89,7 +106,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
89 if (!cpumask_intersects(new_value, cpu_online_mask)) { 106 if (!cpumask_intersects(new_value, cpu_online_mask)) {
90 /* Special case for empty set - allow the architecture 107 /* Special case for empty set - allow the architecture
91 code to set default SMP affinity. */ 108 code to set default SMP affinity. */
92 err = irq_select_affinity_usr(irq) ? -EINVAL : count; 109 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
93 } else { 110 } else {
94 irq_set_affinity(irq, new_value); 111 irq_set_affinity(irq, new_value);
95 err = count; 112 err = count;
@@ -100,11 +117,28 @@ free_cpumask:
100 return err; 117 return err;
101} 118}
102 119
120static ssize_t irq_affinity_proc_write(struct file *file,
121 const char __user *buffer, size_t count, loff_t *pos)
122{
123 return write_irq_affinity(0, file, buffer, count, pos);
124}
125
126static ssize_t irq_affinity_list_proc_write(struct file *file,
127 const char __user *buffer, size_t count, loff_t *pos)
128{
129 return write_irq_affinity(1, file, buffer, count, pos);
130}
131
103static int irq_affinity_proc_open(struct inode *inode, struct file *file) 132static int irq_affinity_proc_open(struct inode *inode, struct file *file)
104{ 133{
105 return single_open(file, irq_affinity_proc_show, PDE(inode)->data); 134 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
106} 135}
107 136
137static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
138{
139 return single_open(file, irq_affinity_list_proc_show, PDE(inode)->data);
140}
141
108static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) 142static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
109{ 143{
110 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); 144 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
@@ -125,6 +159,14 @@ static const struct file_operations irq_affinity_hint_proc_fops = {
125 .release = single_release, 159 .release = single_release,
126}; 160};
127 161
162static const struct file_operations irq_affinity_list_proc_fops = {
163 .open = irq_affinity_list_proc_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 .write = irq_affinity_list_proc_write,
168};
169
128static int default_affinity_show(struct seq_file *m, void *v) 170static int default_affinity_show(struct seq_file *m, void *v)
129{ 171{
130 seq_cpumask(m, irq_default_affinity); 172 seq_cpumask(m, irq_default_affinity);
@@ -185,7 +227,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v)
185{ 227{
186 struct irq_desc *desc = irq_to_desc((long) m->private); 228 struct irq_desc *desc = irq_to_desc((long) m->private);
187 229
188 seq_printf(m, "%d\n", desc->node); 230 seq_printf(m, "%d\n", desc->irq_data.node);
189 return 0; 231 return 0;
190} 232}
191 233
@@ -214,7 +256,7 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v)
214 256
215static int irq_spurious_proc_open(struct inode *inode, struct file *file) 257static int irq_spurious_proc_open(struct inode *inode, struct file *file)
216{ 258{
217 return single_open(file, irq_spurious_proc_show, NULL); 259 return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
218} 260}
219 261
220static const struct file_operations irq_spurious_proc_fops = { 262static const struct file_operations irq_spurious_proc_fops = {
@@ -269,7 +311,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
269{ 311{
270 char name [MAX_NAMELEN]; 312 char name [MAX_NAMELEN];
271 313
272 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 314 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
273 return; 315 return;
274 316
275 memset(name, 0, MAX_NAMELEN); 317 memset(name, 0, MAX_NAMELEN);
@@ -289,6 +331,10 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
289 proc_create_data("affinity_hint", 0400, desc->dir, 331 proc_create_data("affinity_hint", 0400, desc->dir,
290 &irq_affinity_hint_proc_fops, (void *)(long)irq); 332 &irq_affinity_hint_proc_fops, (void *)(long)irq);
291 333
334 /* create /proc/irq/<irq>/smp_affinity_list */
335 proc_create_data("smp_affinity_list", 0600, desc->dir,
336 &irq_affinity_list_proc_fops, (void *)(long)irq);
337
292 proc_create_data("node", 0444, desc->dir, 338 proc_create_data("node", 0444, desc->dir,
293 &irq_node_proc_fops, (void *)(long)irq); 339 &irq_node_proc_fops, (void *)(long)irq);
294#endif 340#endif
@@ -297,6 +343,25 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
297 &irq_spurious_proc_fops, (void *)(long)irq); 343 &irq_spurious_proc_fops, (void *)(long)irq);
298} 344}
299 345
346void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
347{
348 char name [MAX_NAMELEN];
349
350 if (!root_irq_dir || !desc->dir)
351 return;
352#ifdef CONFIG_SMP
353 remove_proc_entry("smp_affinity", desc->dir);
354 remove_proc_entry("affinity_hint", desc->dir);
355 remove_proc_entry("smp_affinity_list", desc->dir);
356 remove_proc_entry("node", desc->dir);
357#endif
358 remove_proc_entry("spurious", desc->dir);
359
360 memset(name, 0, MAX_NAMELEN);
361 sprintf(name, "%u", irq);
362 remove_proc_entry(name, root_irq_dir);
363}
364
300#undef MAX_NAMELEN 365#undef MAX_NAMELEN
301 366
302void unregister_handler_proc(unsigned int irq, struct irqaction *action) 367void unregister_handler_proc(unsigned int irq, struct irqaction *action)
@@ -339,3 +404,83 @@ void init_irq_proc(void)
339 } 404 }
340} 405}
341 406
407#ifdef CONFIG_GENERIC_IRQ_SHOW
408
409int __weak arch_show_interrupts(struct seq_file *p, int prec)
410{
411 return 0;
412}
413
414#ifndef ACTUAL_NR_IRQS
415# define ACTUAL_NR_IRQS nr_irqs
416#endif
417
418int show_interrupts(struct seq_file *p, void *v)
419{
420 static int prec;
421
422 unsigned long flags, any_count = 0;
423 int i = *(loff_t *) v, j;
424 struct irqaction *action;
425 struct irq_desc *desc;
426
427 if (i > ACTUAL_NR_IRQS)
428 return 0;
429
430 if (i == ACTUAL_NR_IRQS)
431 return arch_show_interrupts(p, prec);
432
433 /* print header and calculate the width of the first column */
434 if (i == 0) {
435 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
436 j *= 10;
437
438 seq_printf(p, "%*s", prec + 8, "");
439 for_each_online_cpu(j)
440 seq_printf(p, "CPU%-8d", j);
441 seq_putc(p, '\n');
442 }
443
444 desc = irq_to_desc(i);
445 if (!desc)
446 return 0;
447
448 raw_spin_lock_irqsave(&desc->lock, flags);
449 for_each_online_cpu(j)
450 any_count |= kstat_irqs_cpu(i, j);
451 action = desc->action;
452 if (!action && !any_count)
453 goto out;
454
455 seq_printf(p, "%*d: ", prec, i);
456 for_each_online_cpu(j)
457 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
458
459 if (desc->irq_data.chip) {
460 if (desc->irq_data.chip->irq_print_chip)
461 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
462 else if (desc->irq_data.chip->name)
463 seq_printf(p, " %8s", desc->irq_data.chip->name);
464 else
465 seq_printf(p, " %8s", "-");
466 } else {
467 seq_printf(p, " %8s", "None");
468 }
469#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
470 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
471#endif
472 if (desc->name)
473 seq_printf(p, "-%-8s", desc->name);
474
475 if (action) {
476 seq_printf(p, " %s", action->name);
477 while ((action = action->next) != NULL)
478 seq_printf(p, ", %s", action->name);
479 }
480
481 seq_putc(p, '\n');
482out:
483 raw_spin_unlock_irqrestore(&desc->lock, flags);
484 return 0;
485}
486#endif
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 090c3763f3a2..14dd5761e8c9 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
23#ifdef CONFIG_HARDIRQS_SW_RESEND 23#ifdef CONFIG_HARDIRQS_SW_RESEND
24 24
25/* Bitmap to handle software resend of interrupts: */ 25/* Bitmap to handle software resend of interrupts: */
26static DECLARE_BITMAP(irqs_resend, NR_IRQS); 26static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
27 27
28/* 28/*
29 * Run software resends of IRQ's 29 * Run software resends of IRQ's
@@ -55,22 +55,21 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
55 */ 55 */
56void check_irq_resend(struct irq_desc *desc, unsigned int irq) 56void check_irq_resend(struct irq_desc *desc, unsigned int irq)
57{ 57{
58 unsigned int status = desc->status;
59
60 /*
61 * Make sure the interrupt is enabled, before resending it:
62 */
63 desc->chip->enable(irq);
64
65 /* 58 /*
66 * We do not resend level type interrupts. Level type 59 * We do not resend level type interrupts. Level type
67 * interrupts are resent by hardware when they are still 60 * interrupts are resent by hardware when they are still
68 * active. 61 * active.
69 */ 62 */
70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 63 if (irq_settings_is_level(desc))
71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 64 return;
65 if (desc->istate & IRQS_REPLAY)
66 return;
67 if (desc->istate & IRQS_PENDING) {
68 desc->istate &= ~IRQS_PENDING;
69 desc->istate |= IRQS_REPLAY;
72 70
73 if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { 71 if (!desc->irq_data.chip->irq_retrigger ||
72 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
74#ifdef CONFIG_HARDIRQS_SW_RESEND 73#ifdef CONFIG_HARDIRQS_SW_RESEND
75 /* Set it pending and activate the softirq: */ 74 /* Set it pending and activate the softirq: */
76 set_bit(irq, irqs_resend); 75 set_bit(irq, irqs_resend);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
new file mode 100644
index 000000000000..f1667833d444
--- /dev/null
+++ b/kernel/irq/settings.h
@@ -0,0 +1,142 @@
1/*
2 * Internal header to deal with irq_desc->status which will be renamed
3 * to irq_desc->settings.
4 */
5enum {
6 _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
7 _IRQ_PER_CPU = IRQ_PER_CPU,
8 _IRQ_LEVEL = IRQ_LEVEL,
9 _IRQ_NOPROBE = IRQ_NOPROBE,
10 _IRQ_NOREQUEST = IRQ_NOREQUEST,
11 _IRQ_NOTHREAD = IRQ_NOTHREAD,
12 _IRQ_NOAUTOEN = IRQ_NOAUTOEN,
13 _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
17};
18
19#define IRQ_PER_CPU GOT_YOU_MORON
20#define IRQ_NO_BALANCING GOT_YOU_MORON
21#define IRQ_LEVEL GOT_YOU_MORON
22#define IRQ_NOPROBE GOT_YOU_MORON
23#define IRQ_NOREQUEST GOT_YOU_MORON
24#define IRQ_NOTHREAD GOT_YOU_MORON
25#define IRQ_NOAUTOEN GOT_YOU_MORON
26#define IRQ_NESTED_THREAD GOT_YOU_MORON
27#undef IRQF_MODIFY_MASK
28#define IRQF_MODIFY_MASK GOT_YOU_MORON
29
30static inline void
31irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
32{
33 desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
34 desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
35}
36
37static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
38{
39 return desc->status_use_accessors & _IRQ_PER_CPU;
40}
41
42static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
43{
44 desc->status_use_accessors |= _IRQ_PER_CPU;
45}
46
47static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
48{
49 desc->status_use_accessors |= _IRQ_NO_BALANCING;
50}
51
52static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
53{
54 return desc->status_use_accessors & _IRQ_NO_BALANCING;
55}
56
57static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
58{
59 return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
60}
61
62static inline void
63irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
64{
65 desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK;
66 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
67}
68
69static inline bool irq_settings_is_level(struct irq_desc *desc)
70{
71 return desc->status_use_accessors & _IRQ_LEVEL;
72}
73
74static inline void irq_settings_clr_level(struct irq_desc *desc)
75{
76 desc->status_use_accessors &= ~_IRQ_LEVEL;
77}
78
79static inline void irq_settings_set_level(struct irq_desc *desc)
80{
81 desc->status_use_accessors |= _IRQ_LEVEL;
82}
83
84static inline bool irq_settings_can_request(struct irq_desc *desc)
85{
86 return !(desc->status_use_accessors & _IRQ_NOREQUEST);
87}
88
89static inline void irq_settings_clr_norequest(struct irq_desc *desc)
90{
91 desc->status_use_accessors &= ~_IRQ_NOREQUEST;
92}
93
94static inline void irq_settings_set_norequest(struct irq_desc *desc)
95{
96 desc->status_use_accessors |= _IRQ_NOREQUEST;
97}
98
99static inline bool irq_settings_can_thread(struct irq_desc *desc)
100{
101 return !(desc->status_use_accessors & _IRQ_NOTHREAD);
102}
103
104static inline void irq_settings_clr_nothread(struct irq_desc *desc)
105{
106 desc->status_use_accessors &= ~_IRQ_NOTHREAD;
107}
108
109static inline void irq_settings_set_nothread(struct irq_desc *desc)
110{
111 desc->status_use_accessors |= _IRQ_NOTHREAD;
112}
113
114static inline bool irq_settings_can_probe(struct irq_desc *desc)
115{
116 return !(desc->status_use_accessors & _IRQ_NOPROBE);
117}
118
119static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
120{
121 desc->status_use_accessors &= ~_IRQ_NOPROBE;
122}
123
124static inline void irq_settings_set_noprobe(struct irq_desc *desc)
125{
126 desc->status_use_accessors |= _IRQ_NOPROBE;
127}
128
129static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
130{
131 return desc->status_use_accessors & _IRQ_MOVE_PCNTXT;
132}
133
134static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
135{
136 return !(desc->status_use_accessors & _IRQ_NOAUTOEN);
137}
138
139static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
140{
141 return desc->status_use_accessors & _IRQ_NESTED_THREAD;
142}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 89fb90ae534f..aa57d5da18c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -14,75 +14,100 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/timer.h> 15#include <linux/timer.h>
16 16
17#include "internals.h"
18
17static int irqfixup __read_mostly; 19static int irqfixup __read_mostly;
18 20
19#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) 21#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
20static void poll_spurious_irqs(unsigned long dummy); 22static void poll_spurious_irqs(unsigned long dummy);
21static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); 23static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
24static int irq_poll_cpu;
25static atomic_t irq_poll_active;
26
27/*
28 * We wait here for a poller to finish.
29 *
30 * If the poll runs on this CPU, then we yell loudly and return
31 * false. That will leave the interrupt line disabled in the worst
32 * case, but it should never happen.
33 *
34 * We wait until the poller is done and then recheck disabled and
35 * action (about to be disabled). Only if it's still active, we return
36 * true and let the handler run.
37 */
38bool irq_wait_for_poll(struct irq_desc *desc)
39{
40 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
41 "irq poll in progress on cpu %d for irq %d\n",
42 smp_processor_id(), desc->irq_data.irq))
43 return false;
44
45#ifdef CONFIG_SMP
46 do {
47 raw_spin_unlock(&desc->lock);
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50 raw_spin_lock(&desc->lock);
51 } while (irqd_irq_inprogress(&desc->irq_data));
52 /* Might have been disabled in meantime */
53 return !irqd_irq_disabled(&desc->irq_data) && desc->action;
54#else
55 return false;
56#endif
57}
58
22 59
23/* 60/*
24 * Recovery handler for misrouted interrupts. 61 * Recovery handler for misrouted interrupts.
25 */ 62 */
26static int try_one_irq(int irq, struct irq_desc *desc) 63static int try_one_irq(int irq, struct irq_desc *desc, bool force)
27{ 64{
65 irqreturn_t ret = IRQ_NONE;
28 struct irqaction *action; 66 struct irqaction *action;
29 int ok = 0, work = 0;
30 67
31 raw_spin_lock(&desc->lock); 68 raw_spin_lock(&desc->lock);
32 /* Already running on another processor */
33 if (desc->status & IRQ_INPROGRESS) {
34 /*
35 * Already running: If it is shared get the other
36 * CPU to go looking for our mystery interrupt too
37 */
38 if (desc->action && (desc->action->flags & IRQF_SHARED))
39 desc->status |= IRQ_PENDING;
40 raw_spin_unlock(&desc->lock);
41 return ok;
42 }
43 /* Honour the normal IRQ locking */
44 desc->status |= IRQ_INPROGRESS;
45 action = desc->action;
46 raw_spin_unlock(&desc->lock);
47 69
48 while (action) { 70 /* PER_CPU and nested thread interrupts are never polled */
49 /* Only shared IRQ handlers are safe to call */ 71 if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
50 if (action->flags & IRQF_SHARED) { 72 goto out;
51 if (action->handler(irq, action->dev_id) ==
52 IRQ_HANDLED)
53 ok = 1;
54 }
55 action = action->next;
56 }
57 local_irq_disable();
58 /* Now clean up the flags */
59 raw_spin_lock(&desc->lock);
60 action = desc->action;
61 73
62 /* 74 /*
63 * While we were looking for a fixup someone queued a real 75 * Do not poll disabled interrupts unless the spurious
64 * IRQ clashing with our walk: 76 * disabled poller asks explicitely.
65 */ 77 */
66 while ((desc->status & IRQ_PENDING) && action) { 78 if (irqd_irq_disabled(&desc->irq_data) && !force)
79 goto out;
80
81 /*
82 * All handlers must agree on IRQF_SHARED, so we test just the
83 * first. Check for action->next as well.
84 */
85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || !action->next)
88 goto out;
89
90 /* Already running on another processor */
91 if (irqd_irq_inprogress(&desc->irq_data)) {
67 /* 92 /*
68 * Perform real IRQ processing for the IRQ we deferred 93 * Already running: If it is shared get the other
94 * CPU to go looking for our mystery interrupt too
69 */ 95 */
70 work = 1; 96 desc->istate |= IRQS_PENDING;
71 raw_spin_unlock(&desc->lock); 97 goto out;
72 handle_IRQ_event(irq, action);
73 raw_spin_lock(&desc->lock);
74 desc->status &= ~IRQ_PENDING;
75 } 98 }
76 desc->status &= ~IRQ_INPROGRESS;
77 /*
78 * If we did actual work for the real IRQ line we must let the
79 * IRQ controller clean up too
80 */
81 if (work && desc->chip && desc->chip->end)
82 desc->chip->end(irq);
83 raw_spin_unlock(&desc->lock);
84 99
85 return ok; 100 /* Mark it poll in progress */
101 desc->istate |= IRQS_POLL_INPROGRESS;
102 do {
103 if (handle_irq_event(desc) == IRQ_HANDLED)
104 ret = IRQ_HANDLED;
105 action = desc->action;
106 } while ((desc->istate & IRQS_PENDING) && action);
107 desc->istate &= ~IRQS_POLL_INPROGRESS;
108out:
109 raw_spin_unlock(&desc->lock);
110 return ret == IRQ_HANDLED;
86} 111}
87 112
88static int misrouted_irq(int irq) 113static int misrouted_irq(int irq)
@@ -90,6 +115,11 @@ static int misrouted_irq(int irq)
90 struct irq_desc *desc; 115 struct irq_desc *desc;
91 int i, ok = 0; 116 int i, ok = 0;
92 117
118 if (atomic_inc_return(&irq_poll_active) == 1)
119 goto out;
120
121 irq_poll_cpu = smp_processor_id();
122
93 for_each_irq_desc(i, desc) { 123 for_each_irq_desc(i, desc) {
94 if (!i) 124 if (!i)
95 continue; 125 continue;
@@ -97,9 +127,11 @@ static int misrouted_irq(int irq)
97 if (i == irq) /* Already tried */ 127 if (i == irq) /* Already tried */
98 continue; 128 continue;
99 129
100 if (try_one_irq(i, desc)) 130 if (try_one_irq(i, desc, false))
101 ok = 1; 131 ok = 1;
102 } 132 }
133out:
134 atomic_dec(&irq_poll_active);
103 /* So the caller can adjust the irq error counts */ 135 /* So the caller can adjust the irq error counts */
104 return ok; 136 return ok;
105} 137}
@@ -109,27 +141,39 @@ static void poll_spurious_irqs(unsigned long dummy)
109 struct irq_desc *desc; 141 struct irq_desc *desc;
110 int i; 142 int i;
111 143
144 if (atomic_inc_return(&irq_poll_active) != 1)
145 goto out;
146 irq_poll_cpu = smp_processor_id();
147
112 for_each_irq_desc(i, desc) { 148 for_each_irq_desc(i, desc) {
113 unsigned int status; 149 unsigned int state;
114 150
115 if (!i) 151 if (!i)
116 continue; 152 continue;
117 153
118 /* Racy but it doesn't matter */ 154 /* Racy but it doesn't matter */
119 status = desc->status; 155 state = desc->istate;
120 barrier(); 156 barrier();
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 157 if (!(state & IRQS_SPURIOUS_DISABLED))
122 continue; 158 continue;
123 159
124 local_irq_disable(); 160 local_irq_disable();
125 try_one_irq(i, desc); 161 try_one_irq(i, desc, true);
126 local_irq_enable(); 162 local_irq_enable();
127 } 163 }
128 164out:
165 atomic_dec(&irq_poll_active);
129 mod_timer(&poll_spurious_irq_timer, 166 mod_timer(&poll_spurious_irq_timer,
130 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
131} 168}
132 169
170static inline int bad_action_ret(irqreturn_t action_ret)
171{
172 if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173 return 0;
174 return 1;
175}
176
133/* 177/*
134 * If 99,900 of the previous 100,000 interrupts have not been handled 178 * If 99,900 of the previous 100,000 interrupts have not been handled
135 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -137,17 +181,15 @@ static void poll_spurious_irqs(unsigned long dummy)
137 * 181 *
138 * (The other 100-of-100,000 interrupts may have been a correctly 182 * (The other 100-of-100,000 interrupts may have been a correctly
139 * functioning device sharing an IRQ with the failing one) 183 * functioning device sharing an IRQ with the failing one)
140 *
141 * Called under desc->lock
142 */ 184 */
143
144static void 185static void
145__report_bad_irq(unsigned int irq, struct irq_desc *desc, 186__report_bad_irq(unsigned int irq, struct irq_desc *desc,
146 irqreturn_t action_ret) 187 irqreturn_t action_ret)
147{ 188{
148 struct irqaction *action; 189 struct irqaction *action;
190 unsigned long flags;
149 191
150 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 192 if (bad_action_ret(action_ret)) {
151 printk(KERN_ERR "irq event %d: bogus return value %x\n", 193 printk(KERN_ERR "irq event %d: bogus return value %x\n",
152 irq, action_ret); 194 irq, action_ret);
153 } else { 195 } else {
@@ -157,14 +199,23 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
157 dump_stack(); 199 dump_stack();
158 printk(KERN_ERR "handlers:\n"); 200 printk(KERN_ERR "handlers:\n");
159 201
202 /*
203 * We need to take desc->lock here. note_interrupt() is called
204 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
205 * with something else removing an action. It's ok to take
206 * desc->lock here. See synchronize_irq().
207 */
208 raw_spin_lock_irqsave(&desc->lock, flags);
160 action = desc->action; 209 action = desc->action;
161 while (action) { 210 while (action) {
162 printk(KERN_ERR "[<%p>]", action->handler); 211 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
163 print_symbol(" (%s)", 212 if (action->thread_fn)
164 (unsigned long)action->handler); 213 printk(KERN_CONT " threaded [<%p>] %pf",
165 printk("\n"); 214 action->thread_fn, action->thread_fn);
215 printk(KERN_CONT "\n");
166 action = action->next; 216 action = action->next;
167 } 217 }
218 raw_spin_unlock_irqrestore(&desc->lock, flags);
168} 219}
169 220
170static void 221static void
@@ -216,7 +267,19 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
216void note_interrupt(unsigned int irq, struct irq_desc *desc, 267void note_interrupt(unsigned int irq, struct irq_desc *desc,
217 irqreturn_t action_ret) 268 irqreturn_t action_ret)
218{ 269{
219 if (unlikely(action_ret != IRQ_HANDLED)) { 270 if (desc->istate & IRQS_POLL_INPROGRESS)
271 return;
272
273 /* we get here again via the threaded handler */
274 if (action_ret == IRQ_WAKE_THREAD)
275 return;
276
277 if (bad_action_ret(action_ret)) {
278 report_bad_irq(irq, desc, action_ret);
279 return;
280 }
281
282 if (unlikely(action_ret == IRQ_NONE)) {
220 /* 283 /*
221 * If we are seeing only the odd spurious IRQ caused by 284 * If we are seeing only the odd spurious IRQ caused by
222 * bus asynchronicity then don't eventually trigger an error, 285 * bus asynchronicity then don't eventually trigger an error,
@@ -228,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
228 else 291 else
229 desc->irqs_unhandled++; 292 desc->irqs_unhandled++;
230 desc->last_unhandled = jiffies; 293 desc->last_unhandled = jiffies;
231 if (unlikely(action_ret != IRQ_NONE))
232 report_bad_irq(irq, desc, action_ret);
233 } 294 }
234 295
235 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 296 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
@@ -252,9 +313,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
252 * Now kill the IRQ 313 * Now kill the IRQ
253 */ 314 */
254 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 315 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
255 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 316 desc->istate |= IRQS_SPURIOUS_DISABLED;
256 desc->depth++; 317 desc->depth++;
257 desc->chip->disable(irq); 318 irq_disable(desc);
258 319
259 mod_timer(&poll_spurious_irq_timer, 320 mod_timer(&poll_spurious_irq_timer,
260 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 321 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);